Revert "Merge remote branch 'linux-2.6.32.y/master' into develop"
author黄涛 <huangtao@rock-chips.com>
Sat, 30 Jul 2011 08:24:52 +0000 (16:24 +0800)
committer黄涛 <huangtao@rock-chips.com>
Sat, 30 Jul 2011 08:24:52 +0000 (16:24 +0800)
This reverts commit 6e2688ca848b14639c6ce07ec87c51fdc1742b61, reversing
changes made to 4b7c9de4deb80eb1e1b154112a5e6cb2d3d79f96.

Conflicts:

drivers/gpio/wm831x-gpio.c
drivers/i2c/busses/Kconfig
drivers/net/dm9000.c
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ar9170/hw.h
drivers/net/wireless/ath/ar9170/main.c
drivers/net/wireless/ath/ar9170/usb.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/attach.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/initvals.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/phy.h
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/regd.h
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43/Makefile
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/pio.c
drivers/net/wireless/b43/pio.h
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/hostap/hostap_cs.c
drivers/net/wireless/hostap/hostap_hw.c
drivers/net/wireless/hostap/hostap_pci.c
drivers/net/wireless/hostap/hostap_wlan.h
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-helpers.h
drivers/net/wireless/iwlwifi/iwl-rx.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/libertas/if_sdio1.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rtl818x/rtl8180_dev.c
drivers/net/wireless/wl12xx/wl1251_debugfs.c
drivers/net/wireless/wl12xx/wl1251_sdio.c
drivers/usb/serial/option.c
net/bluetooth/rfcomm/core.c

1091 files changed:
Documentation/filesystems/proc.txt
Documentation/filesystems/tmpfs.txt
Documentation/hwmon/ltc4245
Documentation/i2c/busses/i2c-i801
Documentation/kernel-parameters.txt
Documentation/laptops/thinkpad-acpi.txt
Documentation/networking/3c509.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/err_marvel.c
arch/arm/Kconfig
arch/arm/boot/compressed/head.S
arch/arm/boot/compressed/vmlinux.lds.in
arch/arm/common/sa1111.c
arch/arm/include/asm/assembler.h
arch/arm/include/asm/ptrace.h
arch/arm/include/asm/tlbflush.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/kprobes-decode.c
arch/arm/lib/findbit.S
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-pxa/include/mach/colibri.h
arch/arm/mach-realview/Kconfig
arch/arm/mach-realview/include/mach/barriers.h [deleted file]
arch/arm/mm/copypage-feroceon.c
arch/arm/mm/copypage-v4wb.c
arch/arm/mm/copypage-v4wt.c
arch/arm/mm/copypage-xsc3.c
arch/arm/mm/fault.c
arch/arm/plat-mxc/gpio.c
arch/arm/plat-mxc/include/mach/gpio.h
arch/arm/vfp/vfphw.S
arch/blackfin/include/asm/cache.h
arch/frv/include/asm/cache.h
arch/ia64/hp/common/sba_iommu.c
arch/ia64/include/asm/acpi.h
arch/ia64/include/asm/compat.h
arch/ia64/kernel/fsys.S
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/time.c
arch/ia64/kvm/kvm-ia64.c
arch/ia64/mm/tlb.c
arch/ia64/sn/kernel/msi_sn.c
arch/m68k/include/asm/cache.h
arch/microblaze/Makefile
arch/mips/include/asm/atomic.h
arch/mips/include/asm/compat.h
arch/mips/include/asm/mach-sibyte/war.h
arch/mips/include/asm/mipsregs.h
arch/mips/math-emu/cp1emu.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm.c
arch/mips/mm/uasm.h
arch/mips/mti-malta/malta-pci.c
arch/mips/nxp/pnx8550/common/pci.c
arch/mips/nxp/pnx8550/common/setup.c
arch/mips/pci/ops-pmcmsp.c
arch/mips/pci/pci-yosemite.c
arch/mips/sibyte/sb1250/setup.c
arch/mn10300/include/asm/cache.h
arch/parisc/include/asm/compat.h
arch/parisc/kernel/firmware.c
arch/parisc/math-emu/decode_exc.c
arch/powerpc/Makefile
arch/powerpc/include/asm/compat.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/ppc-pci.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/ppc970-pmu.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/string.S
arch/powerpc/mm/fsl_booke_mmu.c
arch/powerpc/oprofile/op_model_cell.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/eeh_driver.c
arch/powerpc/platforms/pseries/eeh_event.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/plpar_wrappers.h
arch/powerpc/platforms/pseries/smp.c
arch/s390/include/asm/compat.h
arch/s390/include/asm/cputime.h
arch/s390/kernel/nmi.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/s390_ext.c
arch/s390/kernel/time.c
arch/s390/kernel/vtime.c
arch/s390/kvm/kvm-s390.c
arch/s390/lib/delay.c
arch/sh/boot/compressed/misc.c
arch/sh/include/asm/elf.h
arch/sh/kernel/smp.c
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/compat.h
arch/sparc/include/asm/io_32.h
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/oplib_64.h
arch/sparc/include/asm/page_32.h
arch/sparc/include/asm/parport.h
arch/sparc/include/asm/rwsem-const.h
arch/sparc/include/asm/stat.h
arch/sparc/kernel/central.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/tsb.S
arch/sparc/prom/cif.S
arch/sparc/prom/console_64.c
arch/sparc/prom/devops_64.c
arch/sparc/prom/misc_64.c
arch/sparc/prom/p1275.c
arch/sparc/prom/tree_64.c
arch/um/drivers/line.c
arch/um/drivers/ubd_kern.c
arch/um/kernel/uml.lds.S
arch/um/os-Linux/time.c
arch/um/sys-x86_64/Makefile
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/ia32/ia32_aout.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/amd_iommu_types.h
arch/x86/include/asm/cmpxchg_32.h
arch/x86/include/asm/cmpxchg_64.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/io.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/k8.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/rwsem.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/suspend_32.h
arch/x86/include/asm/suspend_64.h
arch/x86/include/asm/system.h
arch/x86/include/asm/trampoline.h
arch/x86/include/asm/tsc.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/probe_64.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/crash.c
arch/x86/kernel/crash_dump_64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/hpet.c
arch/x86/kernel/k8.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/olpc.c
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process_64.c
arch/x86/kernel/pvclock.c
arch/x86/kernel/quirks.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tboot.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/lib/cache-smp.c [deleted file]
arch/x86/lib/rwsem_64.S [deleted file]
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/mm/pgtable.c
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/op_model_amd.c
arch/x86/oprofile/op_model_p4.c
arch/x86/oprofile/op_model_ppro.c
arch/x86/pci/irq.c
arch/x86/power/cpu.c
arch/x86/power/hibernate_asm_32.S
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/smp.c
arch/x86/xen/suspend.c
arch/x86/xen/time.c
arch/xtensa/include/asm/cache.h
block/blk-map.c
block/blk-settings.c
block/blk-timeout.c
block/bsg.c
block/scsi_ioctl.c
crypto/async_tx/async_raid6_recov.c
crypto/authenc.c
crypto/testmgr.c
drivers/Makefile
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/exprep.c
drivers/acpi/blacklist.c
drivers/acpi/ec.c
drivers/acpi/power_meter.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/acpi/sleep.c
drivers/acpi/tables.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ata_generic.c
drivers/ata/ata_piix.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/pata_ali.c
drivers/ata/pata_hpt3x2n.c
drivers/ata/pata_pdc202xx_old.c
drivers/ata/pata_via.c
drivers/ata/sata_mv.c
drivers/ata/sata_nv.c
drivers/ata/sata_via.c
drivers/atm/solos-pci.c
drivers/base/core.c
drivers/base/cpu.c
drivers/base/devtmpfs.c
drivers/base/firmware_class.c
drivers/block/loop.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ldisc.c
drivers/char/agp/Kconfig
drivers/char/agp/amd64-agp.c
drivers/char/agp/hp-agp.c
drivers/char/agp/intel-agp.c
drivers/char/agp/sis-agp.c
drivers/char/hpet.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mem.c
drivers/char/nvram.c
drivers/char/pcmcia/synclink_cs.c
drivers/char/raw.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm_tis.c
drivers/char/tty_buffer.c
drivers/char/tty_io.c
drivers/char/tty_ldisc.c
drivers/char/vt_ioctl.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/cpufreq/cpufreq.c
drivers/cpuidle/governors/menu.c
drivers/crypto/padlock-aes.c
drivers/dma/mv_xor.c
drivers/edac/amd64_edac.c
drivers/edac/edac_mce_amd.c
drivers/firewire/core-card.c
drivers/firewire/core-cdev.c
drivers/firewire/core-device.c
drivers/firewire/ohci.c
drivers/gpio/wm831x-gpio.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/i915/dvo_tfp410.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_legacy_tv.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/vga/vgaarb.c
drivers/hid/hid-core.c
drivers/hid/hid-gyration.c
drivers/hid/hid-ids.h
drivers/hid/hidraw.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/usbhid.h
drivers/hwmon/ams/ams-core.c
drivers/hwmon/ams/ams-i2c.c
drivers/hwmon/ams/ams-pmu.c
drivers/hwmon/ams/ams.h
drivers/hwmon/coretemp.c
drivers/hwmon/f75375s.c
drivers/hwmon/hp_accel.c
drivers/hwmon/it87.c
drivers/hwmon/k8temp.c
drivers/hwmon/lis3lv02d.c
drivers/hwmon/lis3lv02d.h
drivers/hwmon/lm85.c
drivers/hwmon/ltc4245.c
drivers/hwmon/pc87360.c
drivers/hwmon/sht15.c
drivers/hwmon/tmp421.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-pca-isa.c
drivers/i2c/busses/i2c-pca-platform.c
drivers/i2c/i2c-core.c
drivers/ide/cmd640.c
drivers/ide/ide-cd.c
drivers/ide/ide-taskfile.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/input/input.c
drivers/input/joydev.c
drivers/input/keyboard/twl4030_keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/psmouse-base.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/tablet/wacom.h
drivers/input/tablet/wacom_sys.c
drivers/isdn/gigaset/ev-layer.c
drivers/isdn/gigaset/interface.c
drivers/isdn/sc/ioctl.c
drivers/leds/leds-gpio.c
drivers/macintosh/therm_adt746x.c
drivers/md/bitmap.c
drivers/md/bitmap.h
drivers/md/dm-exception-store.h
drivers/md/dm-ioctl.c
drivers/md/dm-mpath.c
drivers/md/dm.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb/dvb-core/dvb_net.c
drivers/media/dvb/dvb-usb/Kconfig
drivers/media/dvb/frontends/l64781.c
drivers/media/dvb/ttpci/Kconfig
drivers/media/dvb/ttpci/budget.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/bt8xx/bttv-i2c.c
drivers/media/video/bt8xx/bttvp.h
drivers/media/video/cx231xx/cx231xx-cards.c
drivers/media/video/cx23885/cx23885-i2c.c
drivers/media/video/cx88/cx88-i2c.c
drivers/media/video/em28xx/em28xx-dvb.c
drivers/media/video/gspca/mr97310a.c
drivers/media/video/gspca/stv06xx/stv06xx.c
drivers/media/video/ivtv/ivtvfb.c
drivers/media/video/pwc/pwc-ctrl.c
drivers/media/video/saa7134/saa7134-core.c
drivers/media/video/saa7134/saa7134-ts.c
drivers/media/video/uvc/uvc_ctrl.c
drivers/media/video/uvc/uvc_driver.c
drivers/media/video/uvc/uvcvideo.h
drivers/media/video/v4l2-compat-ioctl32.c
drivers/memstick/core/mspro_block.c
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptscsih.c
drivers/misc/enclosure.c
drivers/misc/sgi-xp/xpc_partition.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/mtd/nand/pxa3xx_nand.c
drivers/net/3c503.c
drivers/net/arcnet/com20020-pci.c
drivers/net/atl1e/atl1e_ethtool.c
drivers/net/atlx/atl1.c
drivers/net/b44.c
drivers/net/bnx2.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/sja1000/sja1000.h
drivers/net/cpmac.c
drivers/net/cxgb3/ael1002.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/dm9000.c
drivers/net/e1000e/hw.h
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c
drivers/net/eql.c
drivers/net/forcedeth.c
drivers/net/gianfar.c
drivers/net/igb/e1000_82575.c
drivers/net/igb/e1000_hw.h
drivers/net/igb/e1000_mac.c
drivers/net/igb/igb_main.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_type.h
drivers/net/jme.c
drivers/net/ks8851_mll.c
drivers/net/mlx4/icm.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/pppol2tp.c
drivers/net/r6040.c
drivers/net/r8169.c
drivers/net/skge.c
drivers/net/sky2.c
drivers/net/smsc911x.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/tulip/Kconfig
drivers/net/tulip/dmfe.c
drivers/net/tulip/tulip_core.c
drivers/net/tun.c
drivers/net/ucc_geth.c
drivers/net/usb/asix.c
drivers/net/usb/dm9601.c
drivers/net/usb/hso.c
drivers/net/via-rhine.c
drivers/net/via-velocity.c
drivers/net/virtio_net.c
drivers/net/xen-netfront.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/parisc/led.c
drivers/pci/hotplug/ibmphp_ebda.c
drivers/pci/intel-iommu.c
drivers/pci/msi.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/aer/aer_inject.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/proc.c
drivers/pci/quirks.c
drivers/pcmcia/pcmcia_resource.c
drivers/platform/x86/Kconfig
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/apm_power.c
drivers/power/olpc_battery.c
drivers/rtc/class.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-coh901331.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-s3c.c
drivers/s390/cio/cio.c
drivers/scsi/aacraid/commctrl.c
drivers/scsi/aic7xxx/aic79xx_core.c
drivers/scsi/arm/fas216.c
drivers/scsi/gdth.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_ioctl.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sd.c
drivers/scsi/ses.c
drivers/serial/8250_pnp.c
drivers/serial/cpm_uart/cpm_uart_core.c
drivers/serial/imx.c
drivers/ssb/b43_pci_bridge.c
drivers/ssb/driver_chipcommon.c
drivers/ssb/pci.c
drivers/ssb/sprom.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/asus_oled/asus_oled.c
drivers/staging/comedi/Kconfig
drivers/staging/comedi/drivers/Makefile
drivers/staging/comedi/drivers/ni_mio_cs.c
drivers/staging/comedi/drivers/usbdux.c
drivers/staging/frontier/tranzport.c
drivers/staging/hv/Hv.c
drivers/staging/hv/RingBuffer.c
drivers/staging/hv/RndisFilter.c
drivers/staging/hv/StorVscApi.h
drivers/staging/hv/netvsc_drv.c
drivers/staging/hv/storvsc_drv.c
drivers/staging/hv/vmbus_drv.c
drivers/staging/line6/Kconfig
drivers/staging/line6/control.c
drivers/staging/line6/midi.c
drivers/staging/line6/pod.c
drivers/staging/line6/toneport.c
drivers/staging/line6/variax.c
drivers/staging/mimio/Kconfig [new file with mode: 0644]
drivers/staging/mimio/Makefile [new file with mode: 0644]
drivers/staging/mimio/mimio.c [new file with mode: 0644]
drivers/staging/panel/panel.c
drivers/staging/rt2860/common/2860_rtmp_init.c
drivers/staging/rtl8187se/r8185b_init.c
drivers/staging/rtl8192su/r8192U_core.c
drivers/staging/usbip/usbip_event.c
drivers/staging/usbip/vhci_hcd.c
drivers/staging/vt6655/device_main.c
drivers/staging/vt6655/wpactl.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/core/devio.c
drivers/usb/core/driver.c
drivers/usb/core/file.c
drivers/usb/core/generic.c
drivers/usb/core/hcd.c
drivers/usb/core/hcd.h
drivers/usb/core/hub.c
drivers/usb/core/inode.c
drivers/usb/core/message.c
drivers/usb/core/quirks.c
drivers/usb/core/urb.c
drivers/usb/core/usb.c
drivers/usb/gadget/atmel_usba_udc.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/u_serial.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-mem.c
drivers/usb/host/ehci-ppc-of.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci.h
drivers/usb/host/ohci-hub.c
drivers/usb/host/ohci-pnx4008.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/uhci-hcd.c
drivers/usb/host/xhci-ext-caps.h
drivers/usb/host/xhci-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/misc/cypress_cy7c63.c
drivers/usb/misc/iowarrior.c
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/misc/trancevibrator.c
drivers/usb/misc/usbled.c
drivers/usb/misc/usbsevseg.c
drivers/usb/misc/usbtest.c
drivers/usb/mon/mon_bin.c
drivers/usb/musb/blackfin.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/ftdi_sio_ids.h [deleted file]
drivers/usb/serial/io_ti.c
drivers/usb/serial/ir-usb.c
drivers/usb/serial/kl5kusb105.c
drivers/usb/serial/kobil_sct.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/navman.c
drivers/usb/serial/opticon.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/usb/serial/visor.c
drivers/usb/storage/sierra_ms.c
drivers/usb/storage/unusual_devs.h
drivers/video/backlight/backlight.c
drivers/video/backlight/mbp_nvidia_bl.c
drivers/video/bfin-t350mcqb-fb.c
drivers/video/efifb.c
drivers/video/offb.c
drivers/video/sis/sis_main.c
drivers/video/sunxvr500.c
drivers/video/via/accel.c
drivers/video/via/ioctl.c
drivers/video/w100fb.c
drivers/virtio/virtio_pci.c
drivers/w1/slaves/w1_therm.c
drivers/watchdog/bfin_wdt.c
drivers/watchdog/hpwdt.c
drivers/watchdog/iTCO_wdt.c
drivers/xen/events.c
drivers/xen/xenbus/xenbus_xs.c
firmware/Makefile
fs/9p/vfs_file.c
fs/aio.c
fs/binfmt_misc.c
fs/bio.c
fs/block_dev.c
fs/btrfs/acl.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/dir-item.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/relocation.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/xattr.c
fs/btrfs/xattr.h
fs/cachefiles/security.c
fs/char_dev.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/dir.c
fs/cifs/dns_resolve.c
fs/cifs/dns_resolve.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/sess.c
fs/compat.c
fs/dlm/ast.c
fs/dlm/ast.h
fs/dlm/config.c
fs/dlm/debug_fs.c
fs/dlm/dir.c
fs/dlm/dlm_internal.h
fs/dlm/lock.c
fs/dlm/lockspace.c
fs/dlm/lowcomms.c
fs/dlm/member.c
fs/dlm/memory.c
fs/dlm/netlink.c
fs/dlm/plock.c
fs/dlm/rcom.c
fs/dlm/requestqueue.c
fs/dlm/user.c
fs/dlm/user.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/messaging.c
fs/ecryptfs/super.c
fs/exec.c
fs/exofs/dir.c
fs/ext3/super.c
fs/ext3/xattr.c
fs/ext4/balloc.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/fsync.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/ext4/migrate.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fat/namei_vfat.c
fs/file_table.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/gfs2/acl.c
fs/gfs2/acl.h
fs/gfs2/dir.c
fs/gfs2/file.c
fs/gfs2/xattr.c
fs/jbd/journal.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jfs/resize.c
fs/jfs/xattr.c
fs/libfs.c
fs/namei.c
fs/namespace.c
fs/nfs/client.c
fs/nfs/delegation.h
fs/nfs/dir.c
fs/nfs/dns_resolve.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/pagelist.c
fs/nfs/super.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfssvc.c
fs/nilfs2/super.c
fs/notify/inotify/inotify_fsnotify.c
fs/notify/inotify/inotify_user.c
fs/ocfs2/acl.c
fs/ocfs2/alloc.c
fs/ocfs2/alloc.h
fs/ocfs2/aops.c
fs/ocfs2/buffer_head_io.c
fs/ocfs2/dlm/dlmfs.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmthread.c
fs/ocfs2/inode.c
fs/ocfs2/locks.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/super.c
fs/ocfs2/symlink.c
fs/partitions/ibm.c
fs/partitions/msdos.c
fs/pipe.c
fs/proc/array.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/quota/dquot.c
fs/reiserfs/dir.c
fs/reiserfs/journal.c
fs/reiserfs/xattr.c
fs/reiserfs/xattr_security.c
fs/signalfd.c
fs/splice.c
fs/sysfs/file.c
fs/xfs/linux-2.6/xfs_acl.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/quota/xfs_qm_bhv.c
fs/xfs/quota/xfs_qm_syscalls.c
fs/xfs/xfs_ag.h
fs/xfs/xfs_alloc.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_rw.h
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.h
include/acpi/processor.h
include/asm-generic/dma-mapping-common.h
include/drm/drm_pciids.h
include/linux/ata.h
include/linux/blkdev.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/cpuset.h
include/linux/decompress/mm.h
include/linux/ethtool.h
include/linux/fb.h
include/linux/firmware.h
include/linux/freezer.h
include/linux/fs.h
include/linux/hrtimer.h
include/linux/ieee80211.h
include/linux/if_tunnel.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/jbd2.h
include/linux/kvm_host.h
include/linux/lcm.h [deleted file]
include/linux/libata.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/msi.h
include/linux/netdevice.h
include/linux/nfs_fs_sb.h
include/linux/notifier.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/poison.h
include/linux/quotaops.h
include/linux/reiserfs_xattr.h
include/linux/resource.h
include/linux/sched.h
include/linux/ssb/ssb.h
include/linux/ssb/ssb_driver_chipcommon.h
include/linux/ssb/ssb_regs.h
include/linux/swap.h
include/linux/syscalls.h
include/linux/tboot.h
include/linux/tick.h
include/linux/topology.h
include/linux/tty.h
include/linux/usb/quirks.h
include/linux/vmstat.h
include/linux/writeback.h
include/math-emu/op-common.h
include/net/mac80211.h
include/net/sctp/structs.h
include/net/tcp.h
include/net/x25.h
include/scsi/scsi_bsg_fc.h
include/sound/emu10k1.h
include/trace/ftrace.h
init/initramfs.c
init/main.c
ipc/compat.c
ipc/compat_mq.c
ipc/mqueue.c
ipc/sem.c
ipc/shm.c
kernel/cgroup_freezer.c
kernel/compat.c
kernel/cpu.c
kernel/cpuset.c
kernel/cred.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/gcov/fs.c
kernel/groups.c
kernel/hrtimer.c
kernel/irq/chip.c
kernel/irq/manage.c
kernel/kthread.c
kernel/latencytop.c
kernel/module.c
kernel/mutex.c
kernel/perf_event.c
kernel/posix-timers.c
kernel/power/process.c
kernel/power/snapshot.c
kernel/profile.c
kernel/sched.c
kernel/sched_debug.c
kernel/sched_fair.c
kernel/sched_idletask.c
kernel/sched_rt.c
kernel/signal.c
kernel/slow-work.c
kernel/softlockup.c
kernel/sys.c
kernel/time/clocksource.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timer_list.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
lib/Makefile
lib/flex_array.c
lib/idr.c
lib/lcm.c [deleted file]
lib/percpu_counter.c
mm/bounce.c
mm/fadvise.c
mm/filemap.c
mm/hugetlb.c
mm/internal.h
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mlock.c
mm/mmap.c
mm/mmzone.c
mm/mprotect.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/percpu.c
mm/readahead.c
mm/slab.c
mm/swapfile.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/9p/trans_fd.c
net/bluetooth/l2cap.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_netfilter.c
net/can/bcm.c
net/compat.c
net/core/dev.c
net/core/ethtool.c
net/core/iovec.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/scm.c
net/core/skbuff.c
net/core/stream.c
net/dccp/probe.c
net/decnet/af_decnet.c
net/econet/af_econet.c
net/ipv4/devinet.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/route.c
net/irda/af_irda.c
net/irda/iriap.c
net/irda/irlan/irlan_common.c
net/irda/parameters.c
net/llc/af_llc.c
net/mac80211/Kconfig
net/mac80211/agg-tx.c
net/mac80211/ieee80211_i.h
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/nf_conntrack_core.c
net/netfilter/xt_recent.c
net/netlink/af_netlink.c
net/phonet/af_phonet.c
net/phonet/pep.c
net/phonet/pn_dev.c
net/phonet/pn_netlink.c
net/rds/page.c
net/rds/rdma.c
net/rds/recv.c
net/rose/af_rose.c
net/sched/act_gact.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/sch_generic.c
net/sctp/output.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/rpc_pipe.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/svcsock.c
net/sunrpc/xprtsock.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/net.c
net/unix/af_unix.c
net/wireless/core.h
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext.c
net/x25/af_x25.c
net/x25/x25_facilities.c
net/x25/x25_in.c
scripts/mkmakefile
scripts/mod/modpost.c
security/inode.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/process_keys.c
security/keys/request_key.c
security/min_addr.c
security/selinux/ss/ebitmap.c
sound/core/control.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/pci/ac97/ac97_patch.c
sound/pci/cmipci.c
sound/pci/echoaudio/echoaudio.c
sound/pci/emu10k1/emu10k1.c
sound/pci/emu10k1/emupcm.c
sound/pci/emu10k1/memory.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/ice1712/maya44.c
sound/pci/intel8x0.c
sound/pci/maestro3.c
sound/pci/mixart/mixart.c
sound/pci/oxygen/oxygen.c
sound/pci/riptide/riptide.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/pci/via82xx.c
sound/soc/codecs/ak4104.c
sound/soc/codecs/wm8350.c
sound/soc/codecs/wm8400.c
sound/soc/codecs/wm8580.c
sound/soc/codecs/wm8776.c
sound/soc/codecs/wm8990.c
sound/usb/usbaudio.c
sound/usb/usbmidi.c
sound/usb/usbquirks.h
tools/perf/Documentation/Makefile
tools/perf/Makefile
tools/perf/util/callchain.h
virt/kvm/kvm_main.c

index 5c86fe95555ab468d044259d41b8cbd0a1d687ab..2c48f945546b4dc3f6ebfa0463687445429ad5cb 100644 (file)
@@ -176,6 +176,7 @@ read the file /proc/PID/status:
   CapBnd: ffffffffffffffff
   voluntary_ctxt_switches:        0
   nonvoluntary_ctxt_switches:     1
+  Stack usage:    12 kB
 
 This shows you nearly the same information you would get if you viewed it with
 the ps  command.  In  fact,  ps  uses  the  proc  file  system  to  obtain its
@@ -229,6 +230,7 @@ Table 1-2: Contents of the statm files (as of 2.6.30-rc7)
  Mems_allowed_list           Same as previous, but in "list format"
  voluntary_ctxt_switches     number of voluntary context switches
  nonvoluntary_ctxt_switches  number of non voluntary context switches
+ Stack usage:                stack usage high water mark (round up to page size)
 ..............................................................................
 
 Table 1-3: Contents of the statm files (as of 2.6.8-rc3)
@@ -307,7 +309,7 @@ address           perms offset  dev   inode      pathname
 08049000-0804a000 rw-p 00001000 03:00 8312       /opt/test
 0804a000-0806b000 rw-p 00000000 00:00 0          [heap]
 a7cb1000-a7cb2000 ---p 00000000 00:00 0
-a7cb2000-a7eb2000 rw-p 00000000 00:00 0
+a7cb2000-a7eb2000 rw-p 00000000 00:00 0          [threadstack:001ff4b4]
 a7eb2000-a7eb3000 ---p 00000000 00:00 0
 a7eb3000-a7ed5000 rw-p 00000000 00:00 0
 a7ed5000-a8008000 r-xp 00000000 03:00 4222       /lib/libc.so.6
@@ -343,6 +345,7 @@ is not associated with a file:
  [stack]                  = the stack of the main process
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
+ [threadstack:xxxxxxxx]   = the stack of the thread, xxxxxxxx is the stack size
 
  or if empty, the mapping is anonymous.
 
index fe09a2cb1858de038b39746bdc862af3f6dc21bc..3015da0c6b2a253c4a1b65559a8a8ec82b24e3e9 100644 (file)
@@ -82,13 +82,11 @@ tmpfs has a mount option to set the NUMA memory allocation policy for
 all files in that instance (if CONFIG_NUMA is enabled) - which can be
 adjusted on the fly via 'mount -o remount ...'
 
-mpol=default             use the process allocation policy
-                         (see set_mempolicy(2))
+mpol=default             prefers to allocate memory from the local node
 mpol=prefer:Node         prefers to allocate memory from the given Node
 mpol=bind:NodeList       allocates memory only from nodes in NodeList
 mpol=interleave          prefers to allocate from each node in turn
 mpol=interleave:NodeList allocates from each node of NodeList in turn
-mpol=local              prefers to allocate memory from the local node
 
 NodeList format is a comma-separated list of decimal numbers and ranges,
 a range being two hyphen-separated decimal numbers, the smallest and
@@ -136,5 +134,3 @@ Author:
    Christoph Rohland <cr@sap.com>, 1.12.01
 Updated:
    Hugh Dickins, 4 June 2007
-Updated:
-   KOSAKI Motohiro, 16 Mar 2010
index 86b5880d8502b123e508c94330e61ea255db1292..02838a47d86210fa8bee469f595c94851594bd44 100644 (file)
@@ -72,7 +72,9 @@ in6_min_alarm         5v  output undervoltage alarm
 in7_min_alarm          3v  output undervoltage alarm
 in8_min_alarm          Vee (-12v) output undervoltage alarm
 
-in9_input              GPIO voltage data
+in9_input              GPIO #1 voltage data
+in10_input             GPIO #2 voltage data
+in11_input             GPIO #3 voltage data
 
 power1_input           12v power usage (mW)
 power2_input           5v  power usage (mW)
index e1bb5b261693ead30b736a214c969b20e833f2bc..81c0c59a60eae20da2bd33a8037fdceb002f7fa7 100644 (file)
@@ -15,8 +15,7 @@ Supported adapters:
   * Intel 82801I (ICH9)
   * Intel EP80579 (Tolapai)
   * Intel 82801JI (ICH10)
-  * Intel 3400/5 Series (PCH)
-  * Intel Cougar Point (PCH)
+  * Intel PCH
    Datasheets: Publicly available at the Intel website
 
 Authors: 
index 5f6aa11fb457b9e36610a1e4a5fd2cd2cd1d6768..5bc4eaaa5b7fb302d51548c385b25ccd5569c4b3 100644 (file)
@@ -241,7 +241,7 @@ and is between 256 and 4096 characters. It is defined in the file
 
        acpi_sleep=     [HW,ACPI] Sleep options
                        Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
-                                 old_ordering, s4_nonvs, sci_force_enable }
+                                 old_ordering, s4_nonvs }
                        See Documentation/power/video.txt for information on
                        s3_bios and s3_mode.
                        s3_beep is for debugging; it makes the PC's speaker beep
@@ -254,9 +254,6 @@ and is between 256 and 4096 characters. It is defined in the file
                        of _PTS is used by default).
                        s4_nonvs prevents the kernel from saving/restoring the
                        ACPI NVS memory during hibernation.
-                       sci_force_enable causes the kernel to set SCI_EN directly
-                       on resume from S1/S3 (which is against the ACPI spec,
-                       but some broken systems don't work without it).
 
        acpi_use_timer_override [HW,ACPI]
                        Use timer override. For some broken Nvidia NF5 boards
@@ -2671,13 +2668,6 @@ and is between 256 and 4096 characters. It is defined in the file
                                        medium is write-protected).
                        Example: quirks=0419:aaf5:rl,0421:0433:rc
 
-       userpte=
-                       [X86] Flags controlling user PTE allocations.
-
-                               nohigh = do not allocate PTE pages in
-                                       HIGHMEM regardless of setting
-                                       of CONFIG_HIGHPTE.
-
        vdso=           [X86,SH]
                        vdso=2: enable compat VDSO (default with COMPAT_VDSO)
                        vdso=1: enable VDSO (default)
index 387eb9c6bf5d26979181b633b34b64bc8265bd7a..aafcaa6341915bb001c27d1bd05006add6606224 100644 (file)
@@ -460,8 +460,6 @@ event       code    Key             Notes
                                For Lenovo ThinkPads with a new
                                BIOS, it has to be handled either
                                by the ACPI OSI, or by userspace.
-                               The driver does the right thing,
-                               never mess with this.
 0x1011 0x10    FN+END          Brightness down.  See brightness
                                up for details.
 
@@ -584,15 +582,46 @@ with hotkey_report_mode.
 
 Brightness hotkey notes:
 
-Don't mess with the brightness hotkeys in a Thinkpad.  If you want
-notifications for OSD, use the sysfs backlight class event support.
+These are the current sane choices for brightness key mapping in
+thinkpad-acpi:
 
-The driver will issue KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN events
-automatically for the cases were userspace has to do something to
-implement brightness changes.  When you override these events, you will
-either fail to handle properly the ThinkPads that require explicit
-action to change backlight brightness, or the ThinkPads that require
-that no action be taken to work properly.
+For IBM and Lenovo models *without* ACPI backlight control (the ones on
+which thinkpad-acpi will autoload its backlight interface by default,
+and on which ACPI video does not export a backlight interface):
+
+1. Don't enable or map the brightness hotkeys in thinkpad-acpi, as
+   these older firmware versions unfortunately won't respect the hotkey
+   mask for brightness keys anyway, and always reacts to them.  This
+   usually work fine, unless X.org drivers are doing something to block
+   the BIOS.  In that case, use (3) below.  This is the default mode of
+   operation.
+
+2. Enable the hotkeys, but map them to something else that is NOT
+   KEY_BRIGHTNESS_UP/DOWN or any other keycode that would cause
+   userspace to try to change the backlight level, and use that as an
+   on-screen-display hint.
+
+3. IF AND ONLY IF X.org drivers find a way to block the firmware from
+   automatically changing the brightness, enable the hotkeys and map
+   them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN, and feed that to
+   something that calls xbacklight.  thinkpad-acpi will not be able to
+   change brightness in that case either, so you should disable its
+   backlight interface.
+
+For Lenovo models *with* ACPI backlight control:
+
+1. Load up ACPI video and use that.  ACPI video will report ACPI
+   events for brightness change keys.  Do not mess with thinkpad-acpi
+   defaults in this case.  thinkpad-acpi should not have anything to do
+   with backlight events in a scenario where ACPI video is loaded:
+   brightness hotkeys must be disabled, and the backlight interface is
+   to be kept disabled as well.  This is the default mode of operation.
+
+2. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
+   and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN.  Process
+   these keys on userspace somehow (e.g. by calling xbacklight).
+   The driver will do this automatically if it detects that ACPI video
+   has been disabled.
 
 
 Bluetooth
@@ -650,10 +679,6 @@ LCD, CRT or DVI (if available). The following commands are available:
        echo expand_toggle > /proc/acpi/ibm/video
        echo video_switch > /proc/acpi/ibm/video
 
-NOTE: Access to this feature is restricted to processes owning the
-CAP_SYS_ADMIN capability for safety reasons, as it can interact badly
-enough with some versions of X.org to crash it.
-
 Each video output device can be enabled or disabled individually.
 Reading /proc/acpi/ibm/video shows the status of each device.
 
@@ -1440,5 +1465,3 @@ Sysfs interface changelog:
                and it is always able to disable hot keys.  Very old
                thinkpads are properly supported.  hotkey_bios_mask
                is deprecated and marked for removal.
-
-0x020600:      Marker for backlight change event support.
index 3c45d5dcd63b692c9719d0d37a37b1c809bfa7d4..0643e3b7168cccb44acf65735c71beb5e273845c 100644 (file)
@@ -48,11 +48,11 @@ for LILO parameters for doing this:
 This configures the first found 3c509 card for IRQ 10, base I/O 0x310, and
 transceiver type 3 (10base2). The flag "0x3c509" must be set to avoid conflicts
 with other card types when overriding the I/O address. When the driver is
-loaded as a module, only the IRQ may be overridden. For example,
-setting two cards to IRQ10 and IRQ11 is done by using the irq module
-option:
+loaded as a module, only the IRQ and transceiver setting may be overridden.
+For example, setting two cards to 10base2/IRQ10 and AUI/IRQ11 is done by using
+the xcvr and irq module options:
 
-   options 3c509 irq=10,11
+   options 3c509 xcvr=3,1 irq=10,11
 
 
 (2) Full-duplex mode
@@ -77,8 +77,6 @@ operation.
 itself full-duplex capable. This is almost certainly one of two things: a full-
 duplex-capable  Ethernet switch (*not* a hub), or a full-duplex-capable NIC on
 another system that's connected directly to the 3c509B via a crossover cable.
-
-Full-duplex mode can be enabled using 'ethtool'.
  
 /////Extremely important caution concerning full-duplex mode/////
 Understand that the 3c509B's hardware's full-duplex support is much more
@@ -115,8 +113,6 @@ This insured that merely upgrading the driver from an earlier version would
 never automatically enable full-duplex mode in an existing installation;
 it must always be explicitly enabled via one of these code in order to be
 activated.
-
-The transceiver type can be changed using 'ethtool'.
   
 
 (4a) Interpretation of error messages and common problems
index b23a092ee64bcde576ac46ed8559b46da845193f..c57d3964e00c99a176279542b3deb37bcfe2ede3 100644 (file)
@@ -1974,12 +1974,6 @@ W:       http://acpi4asus.sf.net
 S:     Maintained
 F:     drivers/platform/x86/eeepc-laptop.c
 
-EFIFB FRAMEBUFFER DRIVER
-L:     linux-fbdev@vger.kernel.org
-M:     Peter Jones <pjones@redhat.com>
-S:     Maintained
-F:     drivers/video/efifb.c
-
 EFS FILESYSTEM
 W:     http://aeschi.ch.eu.org/efs/
 S:     Orphan
index a606202c67351bc0f4fdd237754b4625cad9a269..62c75a6c144ba47c6158401156632b91a30eecee 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 32
-EXTRAVERSION = .27
+EXTRAVERSION = .9
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
index 90934b97acd53bf27fa208065c63027f52710ec4..7f418bbc261a0f825ab6072f935f1f974a2f746c 100644 (file)
@@ -6,6 +6,8 @@ config OPROFILE
        tristate "OProfile system profiling (EXPERIMENTAL)"
        depends on PROFILING
        depends on HAVE_OPROFILE
+       depends on TRACING_SUPPORT
+       select TRACING
        select RING_BUFFER
        select RING_BUFFER_ALLOW_SWAP
        help
index 5c905aaaeccd82861ea62d9186517475f0d7019c..52a79dfc13c6774ec1f88f2e288af1a42f774f51 100644 (file)
@@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc)
 #define IO7__ERR_CYC__CYCLE__M (0x7)
 
        printk("%s        Packet In Error: %s\n"
-              "%s        Error in %s, cycle %lld%s%s\n",
+              "%s        Error in %s, cycle %ld%s%s\n",
               err_print_prefix, 
               packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
               err_print_prefix,
@@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
        }
 
        printk("%s      Up Hose Garbage Symptom:\n"
-              "%s        Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
+              "%s        Source Port: %ld - Dest PID: %ld - OpCode: %s\n", 
               err_print_prefix,
               err_print_prefix, 
               EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
@@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
 #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M   (0xfff)
 
        printk("%s      Split Completion Error:\n"      
-              "%s         Source (Bus:Dev:Func): %lld:%lld:%lld\n",
+              "%s         Source (Bus:Dev:Func): %ld:%ld:%ld\n",
               err_print_prefix,
               err_print_prefix,
               EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
index 457038911f57e06f2a9374c087506a58d305db8d..b01836a24c0ffa058a1477b7bd381b3278781fd0 100644 (file)
@@ -909,18 +909,6 @@ config ARM_ERRATA_460075
          ACTLR register. Note that setting specific bits in the ACTLR register
          may not be available in non-secure mode.
 
-config ARM_ERRATA_720789
-       bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
-       depends on CPU_V7 && SMP
-       help
-         This option enables the workaround for the 720789 Cortex-A9 (prior to
-         r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
-         broadcasted CP15 TLB maintenance operations TLBIASIDIS and TLBIMVAIS.
-         As a consequence of this erratum, some TLB entries which should be
-         invalidated are not, resulting in an incoherency in the system page
-         tables. The workaround changes the TLB flushing routines to invalidate
-         entries regardless of the ASID.
-
 endmenu
 
 source "arch/arm/common/Kconfig"
index 65f5a639c2cac4d3e0c420c28e08ebbfd59df692..1cf19a5d9c0b138bf893c5abe4bff0177a7d8ac2 100644 (file)
@@ -162,9 +162,9 @@ not_angel:
 
                .text
                adr     r0, LC0
- ARM(          ldmia   r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
- THUMB(                ldmia   r0, {r1, r2, r3, r4, r5, r6, r11, ip}   )
- THUMB(                ldr     sp, [r0, #32]                           )
+ ARM(          ldmia   r0, {r1, r2, r3, r4, r5, r6, ip, sp}    )
+ THUMB(                ldmia   r0, {r1, r2, r3, r4, r5, r6, ip}        )
+ THUMB(                ldr     sp, [r0, #28]                           )
                subs    r0, r0, r1              @ calculate the delta offset
 
                                                @ if delta is zero, we are
@@ -174,13 +174,12 @@ not_angel:
                /*
                 * We're running at a different address.  We need to fix
                 * up various pointers:
-                *   r5 - zImage base address (_start)
-                *   r6 - size of decompressed image
-                *   r11 - GOT start
+                *   r5 - zImage base address
+                *   r6 - GOT start
                 *   ip - GOT end
                 */
                add     r5, r5, r0
-               add     r11, r11, r0
+               add     r6, r6, r0
                add     ip, ip, r0
 
 #ifndef CONFIG_ZBOOT_ROM
@@ -198,10 +197,10 @@ not_angel:
                /*
                 * Relocate all entries in the GOT table.
                 */
-1:             ldr     r1, [r11, #0]           @ relocate entries in the GOT
+1:             ldr     r1, [r6, #0]            @ relocate entries in the GOT
                add     r1, r1, r0              @ table.  This fixes up the
-               str     r1, [r11], #4           @ C references.
-               cmp     r11, ip
+               str     r1, [r6], #4            @ C references.
+               cmp     r6, ip
                blo     1b
 #else
 
@@ -209,12 +208,12 @@ not_angel:
                 * Relocate entries in the GOT table.  We only relocate
                 * the entries that are outside the (relocated) BSS region.
                 */
-1:             ldr     r1, [r11, #0]           @ relocate entries in the GOT
+1:             ldr     r1, [r6, #0]            @ relocate entries in the GOT
                cmp     r1, r2                  @ entry < bss_start ||
                cmphs   r3, r1                  @ _end < entry
                addlo   r1, r1, r0              @ table.  This fixes up the
-               str     r1, [r11], #4           @ C references.
-               cmp     r11, ip
+               str     r1, [r6], #4            @ C references.
+               cmp     r6, ip
                blo     1b
 #endif
 
@@ -244,7 +243,6 @@ not_relocated:      mov     r0, #0
  * Check to see if we will overwrite ourselves.
  *   r4 = final kernel address
  *   r5 = start of this image
- *   r6 = size of decompressed image
  *   r2 = end of malloc space (and therefore this image)
  * We basically want:
  *   r4 >= r2 -> OK
@@ -252,7 +250,8 @@ not_relocated:      mov     r0, #0
  */
                cmp     r4, r2
                bhs     wont_overwrite
-               add     r0, r4, r6
+               sub     r3, sp, r5              @ > compressed kernel size
+               add     r0, r4, r3, lsl #2      @ allow for 4x expansion
                cmp     r0, r5
                bls     wont_overwrite
 
@@ -268,6 +267,7 @@ not_relocated:      mov     r0, #0
  * r1-r3  = unused
  * r4     = kernel execution address
  * r5     = decompressed kernel start
+ * r6     = processor ID
  * r7     = architecture ID
  * r8     = atags pointer
  * r9-r12,r14 = corrupted
@@ -308,8 +308,7 @@ LC0:                .word   LC0                     @ r1
                .word   _end                    @ r3
                .word   zreladdr                @ r4
                .word   _start                  @ r5
-               .word   _image_size             @ r6
-               .word   _got_start              @ r11
+               .word   _got_start              @ r6
                .word   _got_end                @ ip
                .word   user_stack+4096         @ sp
 LC1:           .word   reloc_end - reloc_start
@@ -333,6 +332,7 @@ params:             ldr     r0, =params_phys
  *
  * On entry,
  *  r4 = kernel execution address
+ *  r6 = processor ID
  *  r7 = architecture number
  *  r8 = atags pointer
  *  r9 = run-time address of "start"  (???)
@@ -538,6 +538,7 @@ __common_mmu_cache_on:
  * r1-r3  = unused
  * r4     = kernel execution address
  * r5     = decompressed kernel start
+ * r6     = processor ID
  * r7     = architecture ID
  * r8     = atags pointer
  * r9-r12,r14 = corrupted
@@ -576,19 +577,19 @@ call_kernel:      bl      cache_clean_flush
  *  r1  = corrupted
  *  r2  = corrupted
  *  r3  = block offset
- *  r9  = corrupted
+ *  r6  = corrupted
  *  r12 = corrupted
  */
 
 call_cache_fn: adr     r12, proc_types
 #ifdef CONFIG_CPU_CP15
-               mrc     p15, 0, r9, c0, c0      @ get processor ID
+               mrc     p15, 0, r6, c0, c0      @ get processor ID
 #else
-               ldr     r9, =CONFIG_PROCESSOR_ID
+               ldr     r6, =CONFIG_PROCESSOR_ID
 #endif
 1:             ldr     r1, [r12, #0]           @ get value
                ldr     r2, [r12, #4]           @ get mask
-               eor     r1, r1, r9              @ (real ^ match)
+               eor     r1, r1, r6              @ (real ^ match)
                tst     r1, r2                  @       & mask
  ARM(          addeq   pc, r12, r3             ) @ call cache function
  THUMB(                addeq   r12, r3                 )
@@ -770,7 +771,8 @@ proc_types:
  * Turn off the Cache and MMU.  ARMv3 does not support
  * reading the control register, but ARMv4 does.
  *
- * On exit, r0, r1, r2, r3, r9, r12 corrupted
+ * On entry,  r6 = processor ID
+ * On exit,   r0, r1, r2, r3, r12 corrupted
  * This routine must preserve: r4, r6, r7
  */
                .align  5
@@ -843,8 +845,10 @@ __armv3_mmu_cache_off:
 /*
  * Clean and flush the cache to maintain consistency.
  *
+ * On entry,
+ *  r6 = processor ID
  * On exit,
- *  r1, r2, r3, r9, r11, r12 corrupted
+ *  r1, r2, r3, r11, r12 corrupted
  * This routine must preserve:
  *  r0, r4, r5, r6, r7
  */
@@ -956,7 +960,7 @@ __armv4_mmu_cache_flush:
                mov     r2, #64*1024            @ default: 32K dcache size (*2)
                mov     r11, #32                @ default: 32 byte line size
                mrc     p15, 0, r3, c0, c0, 1   @ read cache type
-               teq     r3, r9                  @ cache ID register present?
+               teq     r3, r6                  @ cache ID register present?
                beq     no_cache_id
                mov     r1, r3, lsr #18
                and     r1, r1, #7
index cbed030b55cf5057591bc617efdb21b9ecd80222..a5924b9b88bdbad7a2daea6489faf712152174b8 100644 (file)
@@ -36,9 +36,6 @@ SECTIONS
 
   _etext = .;
 
-  /* Assume size of decompressed image is 4x the compressed image */
-  _image_size = (_etext - _text) * 4;
-
   _got_start = .;
   .got                 : { *(.got) }
   _got_end = .;
index b07bfee26e320ead490fa80a14540d22a04db24e..8ba7044c554dc2d35b19bd2dbd8a98d6b3206162 100644 (file)
@@ -887,6 +887,8 @@ static int sa1111_resume(struct platform_device *dev)
        if (!save)
                return 0;
 
+       spin_lock_irqsave(&sachip->lock, flags);
+
        /*
         * Ensure that the SA1111 is still here.
         * FIXME: shouldn't do this here.
@@ -903,13 +905,6 @@ static int sa1111_resume(struct platform_device *dev)
         * First of all, wake up the chip.
         */
        sa1111_wake(sachip);
-
-       /*
-        * Only lock for write ops. Also, sa1111_wake must be called with
-        * released spinlock!
-        */
-       spin_lock_irqsave(&sachip->lock, flags);
-
        sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
        sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
 
index eea494790b56cfaf9e398e29780f6d4d3f84d0f0..00f46d9ce299310360ed89a8c19e2521db58e118 100644 (file)
        @ Slightly optimised to avoid incrementing the pointer twice
        usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
        .if     \rept == 2
-       usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
+       usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
        .endif
 
        add\cond \ptr, #\rept * \inc
index 1df645713d483fedd29e10bbd8cbc79678edb392..bbecccda76d08560e6ef0200444fc3b3a4484118 100644 (file)
@@ -150,24 +150,15 @@ struct pt_regs {
  */
 static inline int valid_user_regs(struct pt_regs *regs)
 {
-       unsigned long mode = regs->ARM_cpsr & MODE_MASK;
-
-       /*
-        * Always clear the F (FIQ) and A (delayed abort) bits
-        */
-       regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
-
-       if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
-               if (mode == USR_MODE)
-                       return 1;
-               if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
-                       return 1;
+       if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
+               regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+               return 1;
        }
 
        /*
         * Force CPSR to something logical...
         */
-       regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
+       regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
        if (!(elf_hwcap & HWCAP_26BIT))
                regs->ARM_cpsr |= USR_MODE;
 
index 00c1cba729cb3053d46908771fb8552d2442e830..c2f1605de35902df4378b1e77898cc796441b4db 100644 (file)
@@ -369,11 +369,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
        if (tlb_flag(TLB_V6_I_ASID))
                asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
        if (tlb_flag(TLB_V7_UIS_ASID))
-#ifdef CONFIG_ARM_ERRATA_720789
-               asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
-#else
                asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
-#endif
 
        if (tlb_flag(TLB_BTB)) {
                /* flush the branch target cache */
@@ -413,11 +409,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
        if (tlb_flag(TLB_V6_I_PAGE))
                asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
        if (tlb_flag(TLB_V7_UIS_PAGE))
-#ifdef CONFIG_ARM_ERRATA_720789
-               asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc");
-#else
                asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
-#endif
 
        if (tlb_flag(TLB_BTB)) {
                /* flush the branch target cache */
index 8e7ad911b579763002590d3fbc38dd4a7c77d8bb..f8992cb3089738aafae646c020083a770af85030 100644 (file)
@@ -383,13 +383,11 @@ ENDPROC(sys_clone_wrapper)
 
 sys_sigreturn_wrapper:
                add     r0, sp, #S_OFF
-               mov     why, #0         @ prevent syscall restart handling
                b       sys_sigreturn
 ENDPROC(sys_sigreturn_wrapper)
 
 sys_rt_sigreturn_wrapper:
                add     r0, sp, #S_OFF
-               mov     why, #0         @ prevent syscall restart handling
                b       sys_rt_sigreturn
 ENDPROC(sys_rt_sigreturn_wrapper)
 
index 8bccbfa693ffc359dc55d6004837d2a149e2c5cd..da1f94906a4e2a35881bde99510760180bdba9e2 100644 (file)
@@ -583,14 +583,13 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
 {
        insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
-       long ppc = (long)p->addr + 8;
        union reg_pair fnr;
        int rd = (insn >> 12) & 0xf;
        int rn = (insn >> 16) & 0xf;
        int rm = insn & 0xf;
        long rdv;
-       long rnv = (rn == 15) ? ppc : regs->uregs[rn];
-       long rmv = (rm == 15) ? ppc : regs->uregs[rm];
+       long rnv  = regs->uregs[rn];
+       long rmv  = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
        long cpsr = regs->ARM_cpsr;
 
        fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
index 64f6bc1a91326c79800bbb9205645a7d0e7e5e0c..1e4cbd4e7be930f2515bece2a19c23f2f2a07845 100644 (file)
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
  */
 .L_found:
 #if __LINUX_ARM_ARCH__ >= 5
-               rsb     r0, r3, #0
-               and     r3, r3, r0
+               rsb     r1, r3, #0
+               and     r3, r3, r1
                clz     r3, r3
                rsb     r3, r3, #31
                add     r0, r2, r3
@@ -190,7 +190,5 @@ ENDPROC(_find_next_bit_be)
                addeq   r2, r2, #1
                mov     r0, r2
 #endif
-               cmp     r1, r0                  @ Clamp to maxbit
-               movlo   r0, r1
                mov     pc, lr
 
index 2f7e4980a1d8b8e4f49d5a925deb97bc18d320a7..332b784050b272592d402afddf077b0ec591d00b 100644 (file)
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
                .end    = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
                .flags  = IORESOURCE_MEM,
        },
-       [1] = {
+       [2] = {
                .start  = AT91SAM9G45_ID_DMA,
                .end    = AT91SAM9G45_ID_DMA,
                .flags  = IORESOURCE_IRQ,
index 6879cfec2c7b8cd28fe8d825d87aab3112cd1ad9..e34d96a825e3a60259a48cbd6dd92a3f79499881 100644 (file)
 #define SYSTEM_REV_S_USES_VAUX3 0x8
 
 static int board_keymap[] = {
-       /*
-        * Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
-        * connected to the ground" matrix state.
-        */
        KEY(0, 0, KEY_Q),
        KEY(0, 1, KEY_O),
        KEY(0, 2, KEY_P),
@@ -48,7 +44,6 @@ static int board_keymap[] = {
        KEY(0, 4, KEY_BACKSPACE),
        KEY(0, 6, KEY_A),
        KEY(0, 7, KEY_S),
-
        KEY(1, 0, KEY_W),
        KEY(1, 1, KEY_D),
        KEY(1, 2, KEY_F),
@@ -57,7 +52,6 @@ static int board_keymap[] = {
        KEY(1, 5, KEY_J),
        KEY(1, 6, KEY_K),
        KEY(1, 7, KEY_L),
-
        KEY(2, 0, KEY_E),
        KEY(2, 1, KEY_DOT),
        KEY(2, 2, KEY_UP),
@@ -65,8 +59,6 @@ static int board_keymap[] = {
        KEY(2, 5, KEY_Z),
        KEY(2, 6, KEY_X),
        KEY(2, 7, KEY_C),
-       KEY(2, 8, KEY_F9),
-
        KEY(3, 0, KEY_R),
        KEY(3, 1, KEY_V),
        KEY(3, 2, KEY_B),
@@ -75,23 +67,20 @@ static int board_keymap[] = {
        KEY(3, 5, KEY_SPACE),
        KEY(3, 6, KEY_SPACE),
        KEY(3, 7, KEY_LEFT),
-
        KEY(4, 0, KEY_T),
        KEY(4, 1, KEY_DOWN),
        KEY(4, 2, KEY_RIGHT),
        KEY(4, 4, KEY_LEFTCTRL),
        KEY(4, 5, KEY_RIGHTALT),
        KEY(4, 6, KEY_LEFTSHIFT),
-       KEY(4, 8, KEY_F10),
-
        KEY(5, 0, KEY_Y),
-       KEY(5, 8, KEY_F11),
-
        KEY(6, 0, KEY_U),
-
        KEY(7, 0, KEY_I),
        KEY(7, 1, KEY_F7),
        KEY(7, 2, KEY_F8),
+       KEY(0xff, 2, KEY_F9),
+       KEY(0xff, 4, KEY_F10),
+       KEY(0xff, 5, KEY_F11),
 };
 
 static struct matrix_keymap_data board_map_data = {
index 5f2ba8d9015cfce31a02f901e7f41742dd1e1fa9..811743c5614769d5fb283201fe77e5c606633d98 100644 (file)
@@ -2,7 +2,6 @@
 #define _COLIBRI_H_
 
 #include <net/ax88796.h>
-#include <mach/mfp.h>
 
 /*
  * common settings for all modules
index 6727c783e0d6f70ab1265eb7674bf3a11309a806..c48e1f2c3349492472d02316fb77f0cb7f29c8ed 100644 (file)
@@ -18,7 +18,6 @@ config REALVIEW_EB_ARM11MP
        bool "Support ARM11MPCore tile"
        depends on MACH_REALVIEW_EB
        select CPU_V6
-       select ARCH_HAS_BARRIERS if SMP
        help
          Enable support for the ARM11MPCore tile on the Realview platform.
 
@@ -36,7 +35,6 @@ config MACH_REALVIEW_PB11MP
        select CPU_V6
        select ARM_GIC
        select HAVE_PATA_PLATFORM
-       select ARCH_HAS_BARRIERS if SMP
        help
          Include support for the ARM(R) RealView MPCore Platform Baseboard.
          PB11MPCore is a platform with an on-board ARM11MPCore and has
diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h
deleted file mode 100644 (file)
index 0c5d749..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * Barriers redefined for RealView ARM11MPCore platforms with L220 cache
- * controller to work around hardware errata causing the outer_sync()
- * operation to deadlock the system.
- */
-#define mb()           dsb()
-#define rmb()          dmb()
-#define wmb()          mb()
index dd9598b5e5271dbdc2614631f5c73835e9363afe..70997d5bee2d76d1d617a5ce5453437e51e4c009 100644 (file)
@@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
 {
        asm("\
        stmfd   sp!, {r4-r9, lr}                \n\
-       mov     ip, %2                          \n\
+       mov     ip, %0                          \n\
 1:     mov     lr, r1                          \n\
        ldmia   r1!, {r2 - r9}                  \n\
        pld     [lr, #32]                       \n\
@@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB\n\
        ldmfd   sp!, {r4-r9, pc}"
        :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
+       : "I" (PAGE_SIZE));
 }
 
 void feroceon_copy_user_highpage(struct page *to, struct page *from,
index 7bc0ac71b371270d38a2aec389388fd473b82dce..9ab098414227e928007212f6159d8fcba88fc763 100644 (file)
@@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
 {
        asm("\
        stmfd   sp!, {r4, lr}                   @ 2\n\
-       mov     r2, %2                          @ 1\n\
+       mov     r2, %0                          @ 1\n\
        ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
 1:     mcr     p15, 0, r0, c7, c6, 1           @ 1   invalidate D line\n\
        stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
@@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
        mcr     p15, 0, r1, c7, c10, 4          @ 1   drain WB\n\
        ldmfd    sp!, {r4, pc}                  @ 3"
        :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+       : "I" (PAGE_SIZE / 64));
 }
 
 void v4wb_copy_user_highpage(struct page *to, struct page *from,
index 35bf60992a14ff890a8ce1e8d38936e2af041140..300efafd66431ae02b042df0e065efca88424bf3 100644 (file)
@@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
 {
        asm("\
        stmfd   sp!, {r4, lr}                   @ 2\n\
-       mov     r2, %2                          @ 1\n\
+       mov     r2, %0                          @ 1\n\
        ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
 1:     stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
        ldmia   r1!, {r3, r4, ip, lr}           @ 4+1\n\
@@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
        mcr     p15, 0, r2, c7, c7, 0           @ flush ID cache\n\
        ldmfd   sp!, {r4, pc}                   @ 3"
        :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+       : "I" (PAGE_SIZE / 64));
 }
 
 void v4wt_copy_user_highpage(struct page *to, struct page *from,
index 27dc3633d4dfebd0d2fab31004f4f5e916423ab2..bc4525f5ab2326d266c828dac8c21f81a7655854 100644 (file)
@@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
 {
        asm("\
        stmfd   sp!, {r4, r5, lr}               \n\
-       mov     lr, %2                          \n\
+       mov     lr, %0                          \n\
                                                \n\
        pld     [r1, #0]                        \n\
        pld     [r1, #32]                       \n\
@@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
                                                \n\
        ldmfd   sp!, {r4, r5, pc}"
        :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
+       : "I" (PAGE_SIZE / 64 - 1));
 }
 
 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
index 3191cd6593477bd53fbf9b3031de39fe54b7f63f..10e06801afb38e9d0a60cdb8b8cd94d43bc9e139 100644 (file)
@@ -386,9 +386,6 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
        if (addr < TASK_SIZE)
                return do_page_fault(addr, fsr, regs);
 
-       if (user_mode(regs))
-               goto bad_area;
-
        index = pgd_index(addr);
 
        /*
index c47aa88cc83e2515fe7f0280ef45326850716e9b..cfc4a8b43e6a613f589c6d210d4226dc0721f5d3 100644 (file)
@@ -223,16 +223,13 @@ static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
        struct mxc_gpio_port *port =
                container_of(chip, struct mxc_gpio_port, chip);
        u32 l;
-       unsigned long flags;
 
-       spin_lock_irqsave(&port->lock, flags);
        l = __raw_readl(port->base + GPIO_GDIR);
        if (dir)
                l |= 1 << offset;
        else
                l &= ~(1 << offset);
        __raw_writel(l, port->base + GPIO_GDIR);
-       spin_unlock_irqrestore(&port->lock, flags);
 }
 
 static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -241,12 +238,9 @@ static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
                container_of(chip, struct mxc_gpio_port, chip);
        void __iomem *reg = port->base + GPIO_DR;
        u32 l;
-       unsigned long flags;
 
-       spin_lock_irqsave(&port->lock, flags);
        l = (__raw_readl(reg) & (~(1 << offset))) | (value << offset);
        __raw_writel(l, reg);
-       spin_unlock_irqrestore(&port->lock, flags);
 }
 
 static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -300,8 +294,6 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
                port[i].chip.base = i * 32;
                port[i].chip.ngpio = 32;
 
-               spin_lock_init(&port[i].lock);
-
                /* its a serious configuration bug when it fails */
                BUG_ON( gpiochip_add(&port[i].chip) < 0 );
 
index 7a0dc5aa2479c4d46794279ebf54d425e2f0fc30..894d2f87c85600c495d117a6d0f524060ee15aaa 100644 (file)
@@ -19,7 +19,6 @@
 #ifndef __ASM_ARCH_MXC_GPIO_H__
 #define __ASM_ARCH_MXC_GPIO_H__
 
-#include <linux/spinlock.h>
 #include <mach/hardware.h>
 #include <asm-generic/gpio.h>
 
@@ -37,7 +36,6 @@ struct mxc_gpio_port {
        int virtual_irq_start;
        struct gpio_chip chip;
        u32 both_edges;
-       spinlock_t lock;
 };
 
 int mxc_gpio_init(struct mxc_gpio_port*, int);
index d66cead97d28b3c5d63d9c22d4fdcb77b7d7bd8e..66dc2d03b7fc70c1130bd7b0a751dbeed6d53b17 100644 (file)
@@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
 #ifdef CONFIG_VFPv3
        @ d16 - d31 registers
        .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
-1:     mcrr    p11, 3, r0, r1, c\dr    @ fmdrr r0, r1, d\dr
+1:     mcrr    p11, 3, r1, r2, c\dr    @ fmdrr r1, r2, d\dr
        mov     pc, lr
        .org    1b + 8
        .endr
index 93f6c634fdf4cbfdb5399e6327baef82096cf390..8542bc31f63cae7445482a4a46b41d797f06acfd 100644 (file)
@@ -15,8 +15,6 @@
 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
 #define SMP_CACHE_BYTES        L1_CACHE_BYTES
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
-
 #ifdef CONFIG_SMP
 #define __cacheline_aligned
 #else
index 7dc0f0f85b7c56f7dc9622242d9e92fd39930415..2797163b8f4f12cb543495664610de65376b1319 100644 (file)
@@ -17,8 +17,6 @@
 #define L1_CACHE_SHIFT         (CONFIG_FRV_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
-
 #define __cacheline_aligned    __attribute__((aligned(L1_CACHE_BYTES)))
 #define ____cacheline_aligned  __attribute__((aligned(L1_CACHE_BYTES)))
 
index 01ae69be074a6794abc65b29155aa80fdebdc274..674a8374c6d9ec989616827a20cd1895dd5fbcbb 100644 (file)
@@ -677,19 +677,12 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
                        spin_unlock_irqrestore(&ioc->saved_lock, flags);
 
                        pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
-                       if (unlikely(pide >= (ioc->res_size << 3))) {
-                               printk(KERN_WARNING "%s: I/O MMU @ %p is"
-                                      "out of mapping resources, %u %u %lx\n",
-                                      __func__, ioc->ioc_hpa, ioc->res_size,
-                                      pages_needed, dma_get_seg_boundary(dev));
-                               return -1;
-                       }
+                       if (unlikely(pide >= (ioc->res_size << 3)))
+                               panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
+                                     ioc->ioc_hpa);
 #else
-                       printk(KERN_WARNING "%s: I/O MMU @ %p is"
-                              "out of mapping resources, %u %u %lx\n",
-                              __func__, ioc->ioc_hpa, ioc->res_size,
-                              pages_needed, dma_get_seg_boundary(dev));
-                       return -1;
+                       panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
+                             ioc->ioc_hpa);
 #endif
                }
        }
@@ -972,8 +965,6 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
 #endif
 
        pide = sba_alloc_range(ioc, dev, size);
-       if (pide < 0)
-               return 0;
 
        iovp = (dma_addr_t) pide << iovp_shift;
 
@@ -1329,7 +1320,6 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
        unsigned long dma_offset, dma_len; /* start/len of DMA stream */
        int n_mappings = 0;
        unsigned int max_seg_size = dma_get_max_seg_size(dev);
-       int idx;
 
        while (nents > 0) {
                unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1428,22 +1418,16 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
                vcontig_sg->dma_length = vcontig_len;
                dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
                ASSERT(dma_len <= DMA_CHUNK_SIZE);
-               idx = sba_alloc_range(ioc, dev, dma_len);
-               if (idx < 0) {
-                       dma_sg->dma_length = 0;
-                       return -1;
-               }
-               dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
-                                                  | dma_offset);
+               dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
+                       | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
+                       | dma_offset);
                n_mappings++;
        }
 
        return n_mappings;
 }
 
-static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
-                              int nents, enum dma_data_direction dir,
-                              struct dma_attrs *attrs);
+
 /**
  * sba_map_sg - map Scatter/Gather list
  * @dev: instance of PCI owned by the driver that's asking.
@@ -1509,10 +1493,6 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
        ** Access to the virtual address is what forces a two pass algorithm.
        */
        coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
-       if (coalesced < 0) {
-               sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
-               return 0;
-       }
 
        /*
        ** Program the I/O Pdir
index 8a20b58ba34f9f7f5e47d88a1d9049ef5a637a15..91df9686a0da88ac8b299ddf81c0ca697d3f075d 100644 (file)
@@ -94,7 +94,6 @@ ia64_acpi_release_global_lock (unsigned int *lock)
 #define acpi_noirq 0   /* ACPI always enabled on IA64 */
 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
 #define acpi_strict 1  /* no ACPI spec workarounds on IA64 */
-#define acpi_ht 0      /* no HT-only mode on IA64 */
 #endif
 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
 static inline void disable_acpi(void) { }
index c8662cd40fdc97aa070a6588db18d98ea135286e..dfcf75b8426d8bde81dd8b7fda51ad5f12df137c 100644 (file)
@@ -198,7 +198,7 @@ ptr_to_compat(void __user *uptr)
 }
 
 static __inline__ void __user *
-arch_compat_alloc_user_space (long len)
+compat_alloc_user_space (long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
index 331d42bda77ae97f457b13f970aa83c40e11d4b0..3567d54f8cee7533ecba41847c5f9957d9481296 100644 (file)
@@ -420,31 +420,22 @@ EX(.fail_efault, ld8 r14=[r33])                   // r14 <- *set
        ;;
 
        RSM_PSR_I(p0, r18, r19)                 // mask interrupt delivery
+       mov ar.ccv=0
        andcm r14=r14,r17                       // filter out SIGKILL & SIGSTOP
-       mov r8=EINVAL                   // default to EINVAL
 
 #ifdef CONFIG_SMP
-       // __ticket_spin_trylock(r31)
-       ld4 r17=[r31]
-       ;;
-       mov.m ar.ccv=r17
-       extr.u r9=r17,17,15
-       adds r19=1,r17
-       extr.u r18=r17,0,15
-       ;;
-       cmp.eq p6,p7=r9,r18
+       mov r17=1
        ;;
-(p6)   cmpxchg4.acq r9=[r31],r19,ar.ccv
-(p6)   dep.z r20=r19,1,15              // next serving ticket for unlock
-(p7)   br.cond.spnt.many .lock_contention
+       cmpxchg4.acq r18=[r31],r17,ar.ccv       // try to acquire the lock
+       mov r8=EINVAL                   // default to EINVAL
        ;;
-       cmp4.eq p0,p7=r9,r17
-       adds r31=2,r31
-(p7)   br.cond.spnt.many .lock_contention
        ld8 r3=[r2]                     // re-read current->blocked now that we hold the lock
+       cmp4.ne p6,p0=r18,r0
+(p6)   br.cond.spnt.many .lock_contention
        ;;
 #else
        ld8 r3=[r2]                     // re-read current->blocked now that we hold the lock
+       mov r8=EINVAL                   // default to EINVAL
 #endif
        add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
        add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -499,9 +490,7 @@ EX(.fail_efault, ld8 r14=[r33])                     // r14 <- *set
 (p6)   br.cond.spnt.few 1b                     // yes -> retry
 
 #ifdef CONFIG_SMP
-       // __ticket_spin_unlock(r31)
-       st2.rel [r31]=r20
-       mov r20=0                                       // i must not leak kernel bits...
+       st4.rel [r31]=r0                        // release the lock
 #endif
        SSM_PSR_I(p0, p9, r31)
        ;;
@@ -523,8 +512,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
 
 .sig_pending:
 #ifdef CONFIG_SMP
-       // __ticket_spin_unlock(r31)
-       st2.rel [r31]=r20                       // release the lock
+       st4.rel [r31]=r0                        // release the lock
 #endif
        SSM_PSR_I(p0, p9, r17)
        ;;
index 4a746ea838ff37b423f92f31a8eecc8207f6217a..6c89228560493df8d17d6e0f7d1b65d28ee543ad 100644 (file)
@@ -25,7 +25,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq,
        if (irq_prepare_move(irq, cpu))
                return -1;
 
-       get_cached_msi_msg(irq, &msg);
+       read_msi_msg(irq, &msg);
 
        addr = msg.address_lo;
        addr &= MSI_ADDR_DEST_ID_MASK;
index a35c661e5e89a544b097f731af1e65486c3077c2..4990495d753189933bebf64f18d1f52303b6e6cd 100644 (file)
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void)
 {
 }
 
-void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
+void update_vsyscall(struct timespec *wall, struct clocksource *c)
 {
         unsigned long flags;
 
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
 
         /* copy fsyscall clock data */
         fsyscall_gtod_data.clk_mask = c->mask;
-        fsyscall_gtod_data.clk_mult = mult;
+        fsyscall_gtod_data.clk_mult = c->mult;
         fsyscall_gtod_data.clk_shift = c->shift;
         fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
         fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
index 2eb636506496dd98fa61406c1ece4e3e33b306f5..0ad09f05efa97698e28b4ed2eeeccbbc7ac9f878 100644 (file)
@@ -1797,8 +1797,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
 {
        struct kvm_memory_slot *memslot;
        int r, i;
-       long base;
-       unsigned long n;
+       long n, base;
        unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
                        offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
 
@@ -1811,7 +1810,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
        if (!memslot->dirty_bitmap)
                goto out;
 
-       n = kvm_dirty_bitmap_bytes(memslot);
+       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
        base = memslot->base_gfn / BITS_PER_LONG;
 
        for (i = 0; i < n/sizeof(long); ++i) {
@@ -1827,7 +1826,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                struct kvm_dirty_log *log)
 {
        int r;
-       unsigned long n;
+       int n;
        struct kvm_memory_slot *memslot;
        int is_dirty = 0;
 
@@ -1845,7 +1844,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (is_dirty) {
                kvm_flush_remote_tlbs(kvm);
                memslot = &kvm->memslots[log->slot];
-               n = kvm_dirty_bitmap_bytes(memslot);
+               n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
                memset(memslot->dirty_bitmap, 0, n);
        }
        r = 0;
index e2cde52116f142d7729a24b3d16e74e3ca9662d4..ee09d261f2e6c0be4071e1de30aee0339d6b9706 100644 (file)
@@ -120,7 +120,7 @@ static inline void down_spin(struct spinaphore *ss)
        ia64_invala();
 
        for (;;) {
-               asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
+               asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
                if (time_before(t, serve))
                        return;
                cpu_relax();
index 9ab2617e46ec1a292988d412a60aa2812fcb9cc7..fbbfb970120128a29df68c549895187ea3e3013e 100644 (file)
@@ -174,7 +174,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq,
         * Release XIO resources for the old MSI PCI address
         */
 
-       get_cached_msi_msg(irq, &msg);
+       read_msi_msg(irq, &msg);
         sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
        pdev = sn_pdev->pdi_linux_pcidev;
        provider = SN_PCIDEV_BUSPROVIDER(pdev);
index ecafbe1718c3cccee4a0441efaca9670b0548c59..fed3fd30de7e468797a85ff5945119c436f92fce 100644 (file)
@@ -8,6 +8,4 @@
 #define        L1_CACHE_SHIFT  4
 #define        L1_CACHE_BYTES  (1<< L1_CACHE_SHIFT)
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
-
 #endif
index f76c8581d747f90909f8f208c6054f0ae5589064..34187354304a6217d16ff3dab5315e605409ab05 100644 (file)
@@ -69,16 +69,12 @@ export MMU DTB
 
 all: linux.bin
 
-# With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 = linux.bin linux.bin.gz
-BOOT_TARGETS2 = simpleImage.%
+BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.%
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
 
-$(BOOT_TARGETS1): vmlinux
-       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-$(BOOT_TARGETS2): vmlinux
+$(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 define archhelp
index 09e7128ec0f0d31afdb3d4574bf46a7032d3d85f..dd75d673447e37147c438991829b70c15c7c721b 100644 (file)
@@ -434,7 +434,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
-               "       daddu   %0, %2                                  \n"
+               "       addu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
@@ -446,7 +446,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
-               "       daddu   %0, %2                                  \n"
+               "       addu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 2f                                  \n"
                "       .subsection 2                                   \n"
@@ -479,7 +479,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
-               "       dsubu   %0, %2                                  \n"
+               "       subu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
@@ -491,7 +491,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
-               "       dsubu   %0, %2                                  \n"
+               "       subu    %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 2f                                  \n"
                "       .subsection 2                                   \n"
@@ -524,10 +524,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
-               "       daddu   %0, %1, %3                              \n"
+               "       addu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
-               "       daddu   %0, %1, %3                              \n"
+               "       addu    %0, %1, %3                              \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -538,10 +538,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
-               "       daddu   %0, %1, %3                              \n"
+               "       addu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 2f                                  \n"
-               "       daddu   %0, %1, %3                              \n"
+               "       addu    %0, %1, %3                              \n"
                "       .subsection 2                                   \n"
                "2:     b       1b                                      \n"
                "       .previous                                       \n"
@@ -576,10 +576,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
-               "       dsubu   %0, %1, %3                              \n"
+               "       subu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
-               "       dsubu   %0, %1, %3                              \n"
+               "       subu    %0, %1, %3                              \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -590,10 +590,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
-               "       dsubu   %0, %1, %3                              \n"
+               "       subu    %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 2f                                  \n"
-               "       dsubu   %0, %1, %3                              \n"
+               "       subu    %0, %1, %3                              \n"
                "       .subsection 2                                   \n"
                "2:     b       1b                                      \n"
                "       .previous                                       \n"
index 27505bdc386e72b41727c05283f159c2ed1a2623..f58aed354bfd3a50095e30cbb879045b57f85410 100644 (file)
@@ -144,7 +144,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = (struct pt_regs *)
                ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
index 743385d7b5f22b36bcea1f0aa740ad923af41ca4..7950ef4f032c218a2382be9560c2138b2c05d8a9 100644 (file)
 #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
     defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
 
-#ifndef __ASSEMBLY__
-extern int sb1250_m3_workaround_needed(void);
-#endif
-
-#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
+#define BCM1250_M3_WAR 1
 #define SIBYTE_1956_WAR        1
 
 #else
index 608dc976455b19ab76c10bc393873eea56c2fb8d..a581d60cbcc21e395a1e7a9ba772c4b0be5dcb4b 100644 (file)
 #define FPU_CSR_COND6   0x40000000      /* $fcc6 */
 #define FPU_CSR_COND7   0x80000000      /* $fcc7 */
 
-/*
- * Bits 18 - 20 of the FPU Status Register will be read as 0,
- * and should be written as zero.
- */
-#define FPU_CSR_RSVD   0x001c0000
-
 /*
  * X the exception cause indicator
  * E the exception enable
 #define FPU_CSR_UDF_S   0x00000008
 #define FPU_CSR_INE_S   0x00000004
 
-/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
-#define FPU_CSR_RM     0x00000003
+/* rounding mode */
 #define FPU_CSR_RN      0x0     /* nearest */
 #define FPU_CSR_RZ      0x1     /* towards zero */
 #define FPU_CSR_RU      0x2     /* towards +Infinity */
index c15d94b2f3a08f928b3639cbe496c366786accdc..454b53924490c58ed383f047f84f68e8be117afe 100644 (file)
@@ -75,9 +75,6 @@ struct mips_fpu_emulator_stats fpuemustats;
 #define FPCREG_RID     0       /* $0  = revision id */
 #define FPCREG_CSR     31      /* $31 = csr */
 
-/* Determine rounding mode from the RM bits of the FCSR */
-#define modeindex(v) ((v) & FPU_CSR_RM)
-
 /* Convert Mips rounding mode (0..3) to IEEE library modes. */
 static const unsigned char ieee_rm[4] = {
        [FPU_CSR_RN] = IEEE754_RN,
@@ -384,14 +381,10 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
                                        (void *) (xcp->cp0_epc),
                                        MIPSInst_RT(ir), value);
 #endif
-
-                               /*
-                                * Don't write reserved bits,
-                                * and convert to ieee library modes
-                                */
-                               ctx->fcr31 = (value &
-                                               ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
-                                               ieee_rm[modeindex(value)];
+                               value &= (FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
+                               ctx->fcr31 &= ~(FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
+                               /* convert to ieee library modes */
+                               ctx->fcr31 |= (value & ~0x3) | ieee_rm[value & 0x3];
                        }
                        if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
                                return SIGFPE;
index 266c0036323a16d7ea768ddbb1af0bd0ff098042..bb1719a55d227dc595467aafd88173dc9507b937 100644 (file)
@@ -73,6 +73,9 @@ static int __cpuinit m4kc_tlbp_war(void)
 enum label_id {
        label_second_part = 1,
        label_leave,
+#ifdef MODULE_START
+       label_module_alloc,
+#endif
        label_vmalloc,
        label_vmalloc_done,
        label_tlbw_hazard,
@@ -89,6 +92,9 @@ enum label_id {
 
 UASM_L_LA(_second_part)
 UASM_L_LA(_leave)
+#ifdef MODULE_START
+UASM_L_LA(_module_alloc)
+#endif
 UASM_L_LA(_vmalloc)
 UASM_L_LA(_vmalloc_done)
 UASM_L_LA(_tlbw_hazard)
@@ -725,15 +731,10 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
         * create the plain linear handler
         */
        if (bcm1250_m3_war()) {
-               unsigned int segbits = 44;
-
-               uasm_i_dmfc0(&p, K0, C0_BADVADDR);
-               uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
+               UASM_i_MFC0(&p, K0, C0_BADVADDR);
+               UASM_i_MFC0(&p, K1, C0_ENTRYHI);
                uasm_i_xor(&p, K0, K0, K1);
-               uasm_i_dsrl32(&p, K1, K0, 62 - 32);
-               uasm_i_dsrl(&p, K0, K0, 12 + 1);
-               uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
-               uasm_i_or(&p, K0, K0, K1);
+               UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
                uasm_il_bnez(&p, &r, K0, label_leave);
                /* No need for uasm_i_nop */
        }
@@ -801,6 +802,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
        } else {
 #if defined(CONFIG_HUGETLB_PAGE)
                const enum label_id ls = label_tlb_huge_update;
+#elif defined(MODULE_START)
+               const enum label_id ls = label_module_alloc;
 #else
                const enum label_id ls = label_vmalloc;
 #endif
@@ -1247,15 +1250,10 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
        memset(relocs, 0, sizeof(relocs));
 
        if (bcm1250_m3_war()) {
-               unsigned int segbits = 44;
-
-               uasm_i_dmfc0(&p, K0, C0_BADVADDR);
-               uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
+               UASM_i_MFC0(&p, K0, C0_BADVADDR);
+               UASM_i_MFC0(&p, K1, C0_ENTRYHI);
                uasm_i_xor(&p, K0, K0, K1);
-               uasm_i_dsrl32(&p, K1, K0, 62 - 32);
-               uasm_i_dsrl(&p, K0, K0, 12 + 1);
-               uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
-               uasm_i_or(&p, K0, K0, K1);
+               UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
                uasm_il_bnez(&p, &r, K0, label_leave);
                /* No need for uasm_i_nop */
        }
index e1bd527377edc1860508da46c6a6d02753135d25..f467199676a8526c67aff9464efd98d5a7c9b68c 100644 (file)
@@ -62,7 +62,7 @@ enum opcode {
        insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
        insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr,
        insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
-       insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
+       insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
        insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
        insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
 };
@@ -116,7 +116,6 @@ static struct insn insn_table[] __cpuinitdata = {
        { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
        { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
        { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
        { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
@@ -363,7 +362,6 @@ I_u2s3u1(_lw)
 I_u1u2u3(_mfc0)
 I_u1u2u3(_mtc0)
 I_u2u1u3(_ori)
-I_u3u1u2(_or)
 I_u2s3u1(_pref)
 I_0(_rfe)
 I_u2s3u1(_sc)
index 5198ae5f64ee0c16ea16f9d042e6442127c54b47..c6d1e3dd82d490df1bb174da54756110c4ceb4fb 100644 (file)
@@ -78,7 +78,6 @@ Ip_u2s3u1(_lw);
 Ip_u1u2u3(_mfc0);
 Ip_u1u2u3(_mtc0);
 Ip_u2u1u3(_ori);
-Ip_u3u1u2(_or);
 Ip_u2s3u1(_pref);
 Ip_0(_rfe);
 Ip_u2s3u1(_sc);
index bf80921f2f56c0b145e698fa2c673ff49c7c0636..2fbfa1a8c3a9a96a4a2bc159a9eb8c986bc4297e 100644 (file)
@@ -247,8 +247,6 @@ void __init mips_pcibios_init(void)
        iomem_resource.end &= 0xfffffffffULL;                   /* 64 GB */
        ioport_resource.end = controller->io_resource->end;
 
-       controller->io_map_base = mips_io_port_base;
-
        register_pci_controller(controller);
 }
 
index 98e86ddb86ccd6e07766d53a04998d7f3a413cc4..eee4f3dfc410e248d5caf1e625b14dcbbb273ff3 100644 (file)
@@ -44,7 +44,6 @@ extern struct pci_ops pnx8550_pci_ops;
 
 static struct pci_controller pnx8550_controller = {
        .pci_ops        = &pnx8550_pci_ops,
-       .io_map_base    = PNX8550_PORT_BASE,
        .io_resource    = &pci_io_resource,
        .mem_resource   = &pci_mem_resource,
 };
index 64246c9c875c51d09e5c3861ca0e6f1096d50ac5..2aed50fef10ff8b800a58c8cdc0551d654b8fbf8 100644 (file)
@@ -113,7 +113,7 @@ void __init plat_mem_setup(void)
        PNX8550_GLB2_ENAB_INTA_O = 0;
 
        /* IO/MEM resources. */
-       set_io_port_base(PNX8550_PORT_BASE);
+       set_io_port_base(KSEG1);
        ioport_resource.start = 0;
        ioport_resource.end = ~0;
        iomem_resource.start = 0;
index 421e1a0d1e29ddc795c629290b94698dd7a2cc07..32548b5d68d6738ec7bf5709cf0a26f6055da3f4 100644 (file)
@@ -944,7 +944,6 @@ static struct pci_controller msp_pci_controller = {
        .pci_ops        = &msp_pci_ops,
        .mem_resource   = &pci_mem_resource,
        .mem_offset     = 0,
-       .io_map_base    = MSP_PCI_IOSPACE_BASE,
        .io_resource    = &pci_io_resource,
        .io_offset      = 0
 };
index cf5e1a25cb7d7fb942c3d0ebe53e9b3c6e5e14c1..0357946f30e6ec5d2719446eaf68a9b86e980b98 100644 (file)
@@ -54,7 +54,6 @@ static int __init pmc_yosemite_setup(void)
                panic(ioremap_failed);
 
        set_io_port_base(io_v_base);
-       py_controller.io_map_base = io_v_base;
        TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1);
 
        ioport_resource.end = TITAN_IO_SIZE - 1;
index 92da3155ce074ac16840c715fde220946a13b4fd..0444da1e23c24e2dc675a40034a2e7adc087ba41 100644 (file)
@@ -87,21 +87,6 @@ static int __init setup_bcm1250(void)
        return ret;
 }
 
-int sb1250_m3_workaround_needed(void)
-{
-       switch (soc_type) {
-       case K_SYS_SOC_TYPE_BCM1250:
-       case K_SYS_SOC_TYPE_BCM1250_ALT:
-       case K_SYS_SOC_TYPE_BCM1250_ALT2:
-       case K_SYS_SOC_TYPE_BCM1125:
-       case K_SYS_SOC_TYPE_BCM1125H:
-               return soc_pass < K_SYS_REVISION_BCM1250_C0;
-
-       default:
-               return 0;
-       }
-}
-
 static int __init setup_bcm112x(void)
 {
        int ret = 0;
index 6e2fe28dde4e7d68f260871c9377b96f30e8e644..e03cfa2e997e7299e1215a3a8c25a516d19d6751 100644 (file)
@@ -21,8 +21,6 @@
 #define L1_CACHE_DISPARITY     L1_CACHE_NENTRIES * L1_CACHE_BYTES
 #endif
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
-
 /* data cache purge registers
  * - read from the register to unconditionally purge that cache line
  * - write address & 0xffffff00 to conditionally purge that cache line
index 7c77fa93ab33c29415e2876b92c137fecc3dd334..7f32611a7a5ebc1fe64e36716db7748df21c5070 100644 (file)
@@ -146,7 +146,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static __inline__ void __user *arch_compat_alloc_user_space(long len)
+static __inline__ void __user *compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = &current->thread.regs;
        return (void __user *)regs->gr[30];
index df971fa0c32f1fcbf411437a8e61ce07be5e57af..4c247e02d9b1b0e0655e47ebc15470801b4d9881 100644 (file)
@@ -1123,6 +1123,7 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096];
  */
 int pdc_iodc_print(const unsigned char *str, unsigned count)
 {
+       static int posx;        /* for simple TAB-Simulation... */
        unsigned int i;
        unsigned long flags;
 
@@ -1132,12 +1133,19 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
                        iodc_dbuf[i+0] = '\r';
                        iodc_dbuf[i+1] = '\n';
                        i += 2;
+                       posx = 0;
                        goto print;
+               case '\t':
+                       while (posx & 7) {
+                               iodc_dbuf[i] = ' ';
+                               i++, posx++;
+                       }
+                       break;
                case '\b':      /* BS */
-                       i--; /* overwrite last */
+                       posx -= 2;
                default:
                        iodc_dbuf[i] = str[i];
-                       i++;
+                       i++, posx++;
                        break;
                }
        }
index 27a7492ddb0d66f7762e94dea40ff3f3ac1fcd85..3ca1c61492182d3321c2fe7d2cf00d003ff5a8a5 100644 (file)
@@ -342,7 +342,6 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
                return SIGNALCODE(SIGFPE, FPE_FLTINV);
          case DIVISIONBYZEROEXCEPTION:
                update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
-               Clear_excp_register(exception_index);
                return SIGNALCODE(SIGFPE, FPE_FLTDIV);
          case INEXACTEXCEPTION:
                update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
index c107b74748908b3f3726d2eeaad470edda605111..1a54a3b3a3fa8da5397842d5cbbd2e370dc6d40b 100644 (file)
@@ -158,11 +158,9 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
 # Default to zImage, override when needed
 all: zImage
 
-# With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 := zImage zImage.initrd uImage
-BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
+BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
 
-PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
+PHONY += $(BOOT_TARGETS)
 
 boot := arch/$(ARCH)/boot
 
@@ -177,16 +175,10 @@ relocs_check: arch/powerpc/relocs_check.pl vmlinux
 zImage: relocs_check
 endif
 
-$(BOOT_TARGETS1): vmlinux
-       $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-$(BOOT_TARGETS2): vmlinux
-       $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-
-
-bootwrapper_install:
+$(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
 
-%.dtb:
+bootwrapper_install %.dtb:
        $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
 
 define archhelp
index 8d0fff39cdbae152bb03771914f1628109a5197d..4774c2f92232b8ce98177d555b4d2c592dc076e5 100644 (file)
@@ -133,7 +133,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = current->thread.regs;
        unsigned long usp = regs->gpr[1];
index c1de3c9a1d65ecb6df4f6185bf9d5f93c4eb6b45..abbc2aaaced5bbc68d5be5699e9123a35cd99fdd 100644 (file)
@@ -135,5 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
  */
 struct irq_chip;
 
+#ifdef CONFIG_PERF_EVENTS
+
+#ifdef CONFIG_PPC64
+static inline unsigned long test_perf_event_pending(void)
+{
+       unsigned long x;
+
+       asm volatile("lbz %0,%1(13)"
+               : "=r" (x)
+               : "i" (offsetof(struct paca_struct, perf_event_pending)));
+       return x;
+}
+
+static inline void set_perf_event_pending(void)
+{
+       asm volatile("stb %0,%1(13)" : :
+               "r" (1),
+               "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+
+static inline void clear_perf_event_pending(void)
+{
+       asm volatile("stb %0,%1(13)" : :
+               "r" (0),
+               "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+#endif /* CONFIG_PPC64 */
+
+#else  /* CONFIG_PERF_EVENTS */
+
+static inline unsigned long test_perf_event_pending(void)
+{
+       return 0;
+}
+
+static inline void clear_perf_event_pending(void) {}
+#endif /* CONFIG_PERF_EVENTS */
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_HW_IRQ_H */
index fa6648aaf070524d1c2de8253b8ea35da16de3ff..2828f9d0f66ddb1d66be47662c999282fc25fdaf 100644 (file)
@@ -137,11 +137,6 @@ struct device_node * find_device_pe(struct device_node *dn);
 void eeh_sysfs_add_device(struct pci_dev *pdev);
 void eeh_sysfs_remove_device(struct pci_dev *pdev);
 
-static inline const char *eeh_pci_name(struct pci_dev *pdev)
-{
-       return pdev ? pci_name(pdev) : "<null>";
-}
-
 #endif /* CONFIG_EEH */
 
 #else /* CONFIG_PCI */
index 692c056566c7d9702cfa24ce026e6f4e612128c1..0812b0f414bbe486f38db00ab1a171c4810410ef 100644 (file)
@@ -133,6 +133,7 @@ int main(void)
        DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
        DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
        DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
+       DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
        DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
 #ifdef CONFIG_PPC_MM_SLICES
        DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
index 917cebc2f262b2cd2b3d1f125759611cefd5d7ae..9763267e38b46cbcdb2d6c9bfffd83d61b1eeb08 100644 (file)
@@ -556,6 +556,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
 2:
        TRACE_AND_RESTORE_IRQ(r5);
 
+#ifdef CONFIG_PERF_EVENTS
+       /* check paca->perf_event_pending if we're enabling ints */
+       lbz     r3,PACAPERFPEND(r13)
+       and.    r3,r3,r5
+       beq     27f
+       bl      .perf_event_do_pending
+27:
+#endif /* CONFIG_PERF_EVENTS */
+
        /* extract EE bit and use it to restore paca->hard_enabled */
        ld      r3,_MSR(r1)
        rldicl  r4,r3,49,63             /* r0 = (r3 >> 15) & 1 */
index 0a3cf9eb4ca490459f2855c6d85a7031cd6120d9..c38afdb45d7b066d2720d69a484f4c5a38ed71c0 100644 (file)
@@ -563,21 +563,15 @@ __secondary_start:
        /* Set thread priority to MEDIUM */
        HMT_MEDIUM
 
-       /* Initialize the kernel stack.  Just a repeat for iSeries.      */
-       LOAD_REG_ADDR(r3, current_set)
-       sldi    r28,r24,3               /* get current_set[cpu#]         */
-       ldx     r14,r3,r28
-       addi    r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
-       std     r14,PACAKSAVE(r13)
-
        /* Do early setup for that CPU (stab, slb, hash table pointer) */
        bl      .early_setup_secondary
 
-       /*
-        * setup the new stack pointer, but *don't* use this until
-        * translation is on.
-        */
-       mr      r1, r14
+       /* Initialize the kernel stack.  Just a repeat for iSeries.      */
+       LOAD_REG_ADDR(r3, current_set)
+       sldi    r28,r24,3               /* get current_set[cpu#]         */
+       ldx     r1,r3,r28
+       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+       std     r1,PACAKSAVE(r13)
 
        /* Clear backchain so we get nice backtraces */
        li      r7,0
index 8564a412e7a66f28b0403007f7a6615d23167816..e5d1211779840f029841e1591952c3b5ec9b4fae 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/bootmem.h>
 #include <linux/pci.h>
 #include <linux/debugfs.h>
+#include <linux/perf_event.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -137,6 +138,11 @@ notrace void raw_local_irq_restore(unsigned long en)
        }
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
+       if (test_perf_event_pending()) {
+               clear_perf_event_pending();
+               perf_event_do_pending();
+       }
+
        /*
         * if (get_paca()->hard_enabled) return;
         * But again we need to take care that gcc gets hard_enabled directly
index ec9b95f635bbe52137c9bcd4dd45c65ca3894b2a..479574413a93fa5c7aa3b38e28537b2830517f90 100644 (file)
@@ -173,11 +173,9 @@ static int p970_marked_instr_event(u64 event)
        switch (unit) {
        case PM_VPU:
                mask = 0x4c;            /* byte 0 bits 2,3,6 */
-               break;
        case PM_LSU0:
                /* byte 2 bits 0,2,3,4,6; all of byte 1 */
                mask = 0x085dff00;
-               break;
        case PM_LSU1L:
                mask = 0x50 << 24;      /* byte 3 bits 4,6 */
                break;
index 7143d4ce1cd875e2a8ca38fc5fd1ba0286ddaaf0..a136a11c490d0f36a9b32812132e682c38262f8f 100644 (file)
@@ -530,60 +530,25 @@ void __init iSeries_time_init_early(void)
 }
 #endif /* CONFIG_PPC_ISERIES */
 
-#ifdef CONFIG_PERF_EVENTS
-
-/*
- * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
- */
-#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_event_pending(void)
-{
-       unsigned long x;
-
-       asm volatile("lbz %0,%1(13)"
-               : "=r" (x)
-               : "i" (offsetof(struct paca_struct, perf_event_pending)));
-       return x;
-}
-
-static inline void set_perf_event_pending_flag(void)
-{
-       asm volatile("stb %0,%1(13)" : :
-               "r" (1),
-               "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-
-static inline void clear_perf_event_pending(void)
-{
-       asm volatile("stb %0,%1(13)" : :
-               "r" (0),
-               "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-
-#else /* 32-bit */
-
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
 DEFINE_PER_CPU(u8, perf_event_pending);
 
-#define set_perf_event_pending_flag()  __get_cpu_var(perf_event_pending) = 1
-#define test_perf_event_pending()      __get_cpu_var(perf_event_pending)
-#define clear_perf_event_pending()     __get_cpu_var(perf_event_pending) = 0
-
-#endif /* 32 vs 64 bit */
-
 void set_perf_event_pending(void)
 {
-       preempt_disable();
-       set_perf_event_pending_flag();
+       get_cpu_var(perf_event_pending) = 1;
        set_dec(1);
-       preempt_enable();
+       put_cpu_var(perf_event_pending);
 }
 
-#else  /* CONFIG_PERF_EVENTS */
+#define test_perf_event_pending()      __get_cpu_var(perf_event_pending)
+#define clear_perf_event_pending()     __get_cpu_var(perf_event_pending) = 0
+
+#else  /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
 
 #define test_perf_event_pending()      0
 #define clear_perf_event_pending()
 
-#endif /* CONFIG_PERF_EVENTS */
+#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
 
 /*
  * For iSeries shared processors, we have to let the hypervisor
@@ -611,6 +576,10 @@ void timer_interrupt(struct pt_regs * regs)
        set_dec(DECREMENTER_MAX);
 
 #ifdef CONFIG_PPC32
+       if (test_perf_event_pending()) {
+               clear_perf_event_pending();
+               perf_event_do_pending();
+       }
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
                do_IRQ(regs);
 #endif
@@ -628,11 +597,6 @@ void timer_interrupt(struct pt_regs * regs)
 
        calculate_steal_time();
 
-       if (test_perf_event_pending()) {
-               clear_perf_event_pending();
-               perf_event_do_pending();
-       }
-
 #ifdef CONFIG_PPC_ISERIES
        if (firmware_has_feature(FW_FEATURE_ISERIES))
                get_lppaca()->int_dword.fields.decr_int = 0;
@@ -864,8 +828,7 @@ static cycle_t timebase_read(struct clocksource *cs)
        return (cycle_t)get_tb();
 }
 
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
-                    u32 mult)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
 {
        u64 t2x, stamp_xsec;
 
@@ -878,7 +841,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
 
        /* XXX this assumes clock->shift == 22 */
        /* 4611686018 ~= 2^(20+64-22) / 1e9 */
-       t2x = (u64) mult * 4611686018ULL;
+       t2x = (u64) clock->mult * 4611686018ULL;
        stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
        do_div(stamp_xsec, 1000000000);
        stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
index ff184f4771e93274953dab31d875f2b381318248..2a4551f78f60c242d3477b6742efaf8aa322aaab 100644 (file)
@@ -176,8 +176,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 {
        struct kvm_vcpu *vcpu;
        vcpu = kvmppc_core_vcpu_create(kvm, id);
-       if (!IS_ERR(vcpu))
-               kvmppc_create_vcpu_debugfs(vcpu, id);
+       kvmppc_create_vcpu_debugfs(vcpu, id);
        return vcpu;
 }
 
index 3ac0cd3a53738e28cd7a0cb0649d94b27c5d2d3b..64e2e499e32a6fecacf81cb6eba2d03a32c8e50d 100644 (file)
@@ -71,7 +71,7 @@ _GLOBAL(strcmp)
 
 _GLOBAL(strncmp)
        PPC_LCMPI r5,0
-       ble-    2f
+       beqlr
        mtctr   r5
        addi    r5,r3,-1
        addi    r4,r4,-1
@@ -82,8 +82,6 @@ _GLOBAL(strncmp)
        beqlr   1
        bdnzt   eq,1b
        blr
-2:     li      r3,0
-       blr
 
 _GLOBAL(strlen)
        addi    r4,r3,-1
index 45f4e61b263287992f4a83b72a875d9a5b74e9c2..dc93e95b256eae04e248900e82858d94c8fe822a 100644 (file)
@@ -131,10 +131,15 @@ void settlbcam(int index, unsigned long virt, phys_addr_t phys,
        TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
        TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
 
+#ifndef CONFIG_KGDB /* want user access for breakpoints */
        if (flags & _PAGE_USER) {
           TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
           TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
        }
+#else
+       TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+       TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+#endif
 
        tlbcam_addrs[index].start = virt;
        tlbcam_addrs[index].limit = virt + size - 1;
index c8fc4dc8f572b6aa4490aecaf0d606c6032259dd..ae06c6236d9c396402040be44d25257283141abd 100644 (file)
@@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
                index = ENTRIES-1;
 
        /* make sure index is valid */
-       if ((index >= ENTRIES) || (index < 0))
+       if ((index > ENTRIES) || (index < 0))
                index = ENTRIES-1;
 
        return initial_lfsr[index];
index 3304f32fc7b897c5ce2183bd02f426fdaff522ab..ccd8dd03b8c987e701e476fe43a8f84348901d13 100644 (file)
@@ -491,7 +491,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
            pdn->eeh_mode & EEH_MODE_NOCHECK) {
                ignored_check++;
                pr_debug("EEH: Ignored check (%x) for %s %s\n",
-                        pdn->eeh_mode, eeh_pci_name(dev), dn->full_name);
+                        pdn->eeh_mode, pci_name (dev), dn->full_name);
                return 0;
        }
 
@@ -515,7 +515,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
                        printk (KERN_ERR "EEH: %d reads ignored for recovering device at "
                                "location=%s driver=%s pci addr=%s\n",
                                pdn->eeh_check_count, location,
-                               dev->driver->name, eeh_pci_name(dev));
+                               dev->driver->name, pci_name(dev));
                        printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n",
                                dev->driver->name);
                        dump_stack();
index 52c4b4038cd7c6ee685359152694e13e9ec41a2e..0e8db6771252a2f3dcb4c28891e0f6d2958389a0 100644 (file)
@@ -353,7 +353,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
                location = location ? location : "unknown";
                printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
                                "for location=%s pci addr=%s\n",
-                       location, eeh_pci_name(event->dev));
+                       location, pci_name(event->dev));
                return NULL;
        }
 
@@ -384,7 +384,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
                pci_str = pci_name (frozen_pdn->pcidev);
                drv_str = pcid_name (frozen_pdn->pcidev);
        } else {
-               pci_str = eeh_pci_name(event->dev);
+               pci_str = pci_name (event->dev);
                drv_str = pcid_name (event->dev);
        }
        
index ec5df8f519c7417327923bcadafdbda76874ec04..ddb80f5d850b77231783b7f05c1d7adadd6f9aed 100644 (file)
@@ -80,7 +80,7 @@ static int eeh_event_handler(void * dummy)
        eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
 
        printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
-              eeh_pci_name(event->dev));
+              pci_name(event->dev));
 
        pdn = handle_eeh_events(event);
 
index c2c172042db26329e61625dfa37baca80e131a34..ebff6d9a4e395ae0088da5a26e99459cedc2f0f5 100644 (file)
@@ -66,6 +66,30 @@ static void pseries_mach_cpu_die(void)
        for(;;);
 }
 
+static int qcss_tok;   /* query-cpu-stopped-state token */
+
+/* Get state of physical CPU.
+ * Return codes:
+ *     0       - The processor is in the RTAS stopped state
+ *     1       - stop-self is in progress
+ *     2       - The processor is not in the RTAS stopped state
+ *     -1      - Hardware Error
+ *     -2      - Hardware Busy, Try again later.
+ */
+static int query_cpu_stopped(unsigned int pcpu)
+{
+       int cpu_status, status;
+
+       status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+       if (status != 0) {
+               printk(KERN_ERR
+                      "RTAS query-cpu-stopped-state failed: %i\n", status);
+               return status;
+       }
+
+       return cpu_status;
+}
+
 static int pseries_cpu_disable(void)
 {
        int cpu = smp_processor_id();
@@ -89,9 +113,8 @@ static void pseries_cpu_die(unsigned int cpu)
        unsigned int pcpu = get_hard_smp_processor_id(cpu);
 
        for (tries = 0; tries < 25; tries++) {
-               cpu_status = smp_query_cpu_stopped(pcpu);
-               if (cpu_status == QCSS_STOPPED ||
-                   cpu_status == QCSS_HARDWARE_ERROR)
+               cpu_status = query_cpu_stopped(pcpu);
+               if (cpu_status == 0 || cpu_status == -1)
                        break;
                cpu_relax();
        }
@@ -233,7 +256,6 @@ static int __init pseries_cpu_hotplug_init(void)
 {
        struct device_node *np;
        const char *typep;
-       int qcss_tok;
 
        for_each_node_by_name(np, "interrupt-controller") {
                typep = of_get_property(np, "compatible", NULL);
index 45f634c022081425d72ac024ae689efe91646858..a24a6b2333b2388521989ac5ac2b38365a61e6ab 100644 (file)
@@ -4,14 +4,6 @@
 #include <asm/hvcall.h>
 #include <asm/page.h>
 
-/* Get state of physical CPU from query_cpu_stopped */
-int smp_query_cpu_stopped(unsigned int pcpu);
-#define QCSS_STOPPED 0
-#define QCSS_STOPPING 1
-#define QCSS_NOT_STOPPED 2
-#define QCSS_HARDWARE_ERROR -1
-#define QCSS_HARDWARE_BUSY -2
-
 static inline long poll_pending(void)
 {
        return plpar_hcall_norets(H_POLL_PENDING);
index 3afa079d178b4805c9a43137bb3b13b05a3a5bbd..440000cc71307ac83df8ac251af13085c910aec6 100644 (file)
  */
 static cpumask_t of_spin_map;
 
-/* Query where a cpu is now.  Return codes #defined in plpar_wrappers.h */
-int smp_query_cpu_stopped(unsigned int pcpu)
-{
-       int cpu_status, status;
-       int qcss_tok = rtas_token("query-cpu-stopped-state");
-
-       if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
-               printk(KERN_INFO "Firmware doesn't support "
-                               "query-cpu-stopped-state\n");
-               return QCSS_HARDWARE_ERROR;
-       }
-
-       status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
-       if (status != 0) {
-               printk(KERN_ERR
-                      "RTAS query-cpu-stopped-state failed: %i\n", status);
-               return status;
-       }
-
-       return cpu_status;
-}
-
 /**
  * smp_startup_cpu() - start the given cpu
  *
@@ -103,12 +81,6 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
 
        pcpu = get_hard_smp_processor_id(lcpu);
 
-       /* Check to see if the CPU out of FW already for kexec */
-       if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
-               cpu_set(lcpu, of_spin_map);
-               return 1;
-       }
-
        /* Fixup atomic count: it exited inside IRQ handler. */
        task_thread_info(paca[lcpu].__current)->preempt_count   = 0;
 
index 0c940d38d6b7dc60e6a816991d3e4b8acc642774..01a08020bc0e2bb3757e783ca51ee35478bbbc52 100644 (file)
@@ -180,7 +180,7 @@ static inline int is_compat_task(void)
 
 #endif
 
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
 {
        unsigned long stack;
 
index 258ba88b7b5039edb8ed6b849774966d1e717bda..f23961ada7fb2ce8c99e3b240627d8e1016e1ba2 100644 (file)
@@ -183,7 +183,6 @@ struct s390_idle_data {
        unsigned long long idle_count;
        unsigned long long idle_enter;
        unsigned long long idle_time;
-       int nohz_delay;
 };
 
 DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
@@ -199,11 +198,4 @@ static inline void s390_idle_check(void)
                vtime_start_cpu();
 }
 
-static inline int s390_nohz_delay(int cpu)
-{
-       return per_cpu(s390_idle, cpu).nohz_delay != 0;
-}
-
-#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
-
 #endif /* _S390_CPUTIME_H */
index 24fd61132e35eea95497e49410b34cbd1f0831c7..015e27da40eb100e5375d356c1160d4c3a98c552 100644 (file)
@@ -95,6 +95,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
 static int notrace s390_revalidate_registers(struct mci *mci)
 {
        int kill_task;
+       u64 tmpclock;
        u64 zero;
        void *fpt_save_area, *fpt_creg_save_area;
 
@@ -213,10 +214,11 @@ static int notrace s390_revalidate_registers(struct mci *mci)
                        : "0", "cc");
 #endif
        /* Revalidate clock comparator register */
-       if (S390_lowcore.clock_comparator == -1)
-               set_clock_comparator(get_clock());
-       else
-               set_clock_comparator(S390_lowcore.clock_comparator);
+       asm volatile(
+               "       stck    0(%1)\n"
+               "       sckc    0(%1)"
+               : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
+
        /* Check if old PSW is valid */
        if (!mci->wp)
                /*
index 08f883842e21bac1d673e82fe7560602501e0c16..653c6a1787404480f532d7f033223540e2fcc616 100644 (file)
@@ -632,7 +632,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
 {
-       long ret = 0;
+       long ret;
 
        /* Do the secure computing check first. */
        secure_computing(regs->gprs[2]);
@@ -641,6 +641,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
         * The sysc_tracesys code in entry.S stored the system
         * call number to gprs[2].
         */
+       ret = regs->gprs[2];
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            (tracehook_report_syscall_entry(regs) ||
             regs->gprs[2] >= NR_syscalls)) {
@@ -662,7 +663,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
                                    regs->gprs[2], regs->orig_gpr2,
                                    regs->gprs[3], regs->gprs[4],
                                    regs->gprs[5]);
-       return ret ?: regs->gprs[2];
+       return ret;
 }
 
 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
index 59618bcd99b7a64bc8b9077b688d907ac73fc0d8..0de305b598cee30a1352f67d4ded1eda4f0d3be0 100644 (file)
@@ -126,8 +126,6 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
                /* Serve timer interrupts first. */
                clock_comparator_work();
        kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
-       if (code != 0x1004)
-               __get_cpu_var(s390_idle).nohz_delay = 1;
         index = ext_hash(code);
        for (p = ext_int_hash[index]; p; p = p->next) {
                if (likely(p->code == code))
index 68e1ecf5ebabf657a6c7e053577752dc69d6046b..34162a0b2caa6483f397306cc3e21a457b83b051 100644 (file)
@@ -214,8 +214,7 @@ struct clocksource * __init clocksource_default_clock(void)
        return &clocksource_tod;
 }
 
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
-                    u32 mult)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
 {
        if (clock != &clocksource_tod)
                return;
index b59a812a010e2b1bd0861e30ae3d662c45d3e04a..c41bb0d416e1a25a56f50dde6d40f13a17cab125 100644 (file)
@@ -167,8 +167,6 @@ void vtime_stop_cpu(void)
        /* Wait for external, I/O or machine check interrupt. */
        psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
 
-       idle->nohz_delay = 0;
-
        /* Check if the CPU timer needs to be reprogrammed. */
        if (vq->do_spt) {
                __u64 vmax = VTIMER_MAX_SLICE;
index 75fbf199d71ec79cc2d9b5e61a66232b305bd3d9..ca2d31277b3b46eee836d1c34403521377732692 100644 (file)
@@ -338,13 +338,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 
        rc = kvm_vcpu_init(vcpu, kvm, id);
        if (rc)
-               goto out_free_sie_block;
+               goto out_free_cpu;
        VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
                 vcpu->arch.sie_block);
 
        return vcpu;
-out_free_sie_block:
-       free_page((unsigned long)(vcpu->arch.sie_block));
 out_free_cpu:
        kfree(vcpu);
 out_nomem:
index 7c37ec359ec29ad40fc2cf47642316ac3cbba8bc..752b362bf651d7e1bdf84a3f54601ddf863e8b12 100644 (file)
@@ -29,21 +29,17 @@ static void __udelay_disabled(unsigned long long usecs)
 {
        unsigned long mask, cr0, cr0_saved;
        u64 clock_saved;
-       u64 end;
 
-       mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
-       end = get_clock() + (usecs << 12);
        clock_saved = local_tick_disable();
+       set_clock_comparator(get_clock() + (usecs << 12));
        __ctl_store(cr0_saved, 0, 0);
        cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
        __ctl_load(cr0 , 0, 0);
+       mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
        lockdep_off();
-       do {
-               set_clock_comparator(end);
-               trace_hardirqs_on();
-               __load_psw_mask(mask);
-               local_irq_disable();
-       } while (get_clock() < end);
+       trace_hardirqs_on();
+       __load_psw_mask(mask);
+       local_irq_disable();
        lockdep_on();
        __ctl_load(cr0_saved, 0, 0);
        local_tick_enable(clock_saved);
index 974ba71df4a86095cd1985e6b5fdeed1fd4fc673..fd56a71ca9d9bbf47c3080d2473f2d4ad2ac0478 100644 (file)
@@ -132,7 +132,7 @@ void decompress_kernel(void)
        output_addr = (CONFIG_MEMORY_START + 0x2000);
 #else
        output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
-#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_29BIT
        output_addr |= P2SEG;
 #endif
 #endif
index bf6939cc4744836a5a42671810bdff2681919bbf..ccb1d93bb04361678b967fd80151c43427263b27 100644 (file)
@@ -212,9 +212,7 @@ extern void __kernel_vsyscall;
 
 #define VSYSCALL_AUX_ENT                                       \
        if (vdso_enabled)                                       \
-               NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);        \
-       else                                                    \
-               NEW_AUX_ENT(AT_IGNORE, 0);
+               NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
 #else
 #define VSYSCALL_AUX_ENT
 #endif /* CONFIG_VSYSCALL */
@@ -222,7 +220,7 @@ extern void __kernel_vsyscall;
 #ifdef CONFIG_SH_FPU
 #define FPU_AUX_ENT    NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
 #else
-#define FPU_AUX_ENT    NEW_AUX_ENT(AT_IGNORE, 0)
+#define FPU_AUX_ENT
 #endif
 
 extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
index 71a9c3c47e6fb732fb3e9fc3cd489a4cb163b4d1..160db1003cfb121dde1b41b1463de400805495eb 100644 (file)
@@ -69,7 +69,6 @@ asmlinkage void __cpuinit start_secondary(void)
        unsigned int cpu;
        struct mm_struct *mm = &init_mm;
 
-       enable_mmu();
        atomic_inc(&mm->mm_count);
        atomic_inc(&mm->mm_users);
        current->active_mm = mm;
index f5cc06f44c174f973203ddad92d21d2d5773d878..f2e48009989e1e57f552066d8ae9890264590d5c 100644 (file)
 #define atomic64_set(v, i)     (((v)->counter) = i)
 
 extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(long, atomic64_t *);
+extern void atomic64_add(int, atomic64_t *);
 extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(long, atomic64_t *);
+extern void atomic64_sub(int, atomic64_t *);
 
 extern int atomic_add_ret(int, atomic_t *);
-extern long atomic64_add_ret(long, atomic64_t *);
+extern int atomic64_add_ret(int, atomic64_t *);
 extern int atomic_sub_ret(int, atomic_t *);
-extern long atomic64_sub_ret(long, atomic64_t *);
+extern int atomic64_sub_ret(int, atomic64_t *);
 
 #define atomic_dec_return(v) atomic_sub_ret(1, v)
 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
@@ -91,7 +91,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
        ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 {
        long c, old;
        c = atomic64_read(v);
index 612bb3862c6d4b2da47cf85de7afc66546dc2ac0..0e706257918f35397b10657be3b95538ef23e45b 100644 (file)
@@ -166,7 +166,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = current_thread_info()->kregs;
        unsigned long usp = regs->u_regs[UREG_I6];
index 2889574608db72f36b3ae1ea61309f77e22e282f..93fe21e02c86b0c7607f06c813532223220a68cf 100644 (file)
@@ -8,7 +8,7 @@
 #include <asm/page.h>      /* IO address mapping routines need this */
 #include <asm/system.h>
 
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page)     (((page) - mem_map) << PAGE_SHIFT)
 
 static inline u32 flip_dword (u32 l)
 {
@@ -249,14 +249,10 @@ extern void iounmap(volatile void __iomem *addr);
 
 #define ioread8(X)                     readb(X)
 #define ioread16(X)                    readw(X)
-#define ioread16be(X)                  __raw_readw(X)
 #define ioread32(X)                    readl(X)
-#define ioread32be(X)                  __raw_readl(X)
 #define iowrite8(val,X)                        writeb(val,X)
 #define iowrite16(val,X)               writew(val,X)
-#define iowrite16be(val,X)             __raw_writew(val,X)
 #define iowrite32(val,X)               writel(val,X)
-#define iowrite32be(val,X)             __raw_writel(val,X)
 
 static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
 {
index 9517d063c79c604d99d21967981f07b92cb1c70c..4aee21dc9c6f10d8f6d0f908177f7c56f5095c72 100644 (file)
@@ -468,14 +468,10 @@ static inline void iounmap(volatile void __iomem *addr)
 
 #define ioread8(X)                     readb(X)
 #define ioread16(X)                    readw(X)
-#define ioread16be(X)                  __raw_readw(X)
 #define ioread32(X)                    readl(X)
-#define ioread32be(X)                  __raw_readl(X)
 #define iowrite8(val,X)                        writeb(val,X)
 #define iowrite16(val,X)               writew(val,X)
-#define iowrite16be(val,X)             __raw_writew(val,X)
 #define iowrite32(val,X)               writel(val,X)
-#define iowrite32be(val,X)             __raw_writel(val,X)
 
 /* Create a virtual mapping cookie for an IO port range */
 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
index 3e0b2d62303df55133d1f9547e8c984ba5dbbeb2..a5db0317b5fbfecc523e82b7a27172252cb1f4a2 100644 (file)
@@ -185,8 +185,9 @@ extern int prom_getunumber(int syndrome_code,
                           char *buf, int buflen);
 
 /* Retain physical memory to the caller across soft resets. */
-extern int prom_retain(const char *name, unsigned long size,
-                      unsigned long align, unsigned long *paddr);
+extern unsigned long prom_retain(const char *name,
+                                unsigned long pa_low, unsigned long pa_high,
+                                long size, long align);
 
 /* Load explicit I/D TLB entries into the calling processor. */
 extern long prom_itlb_load(unsigned long index,
@@ -286,6 +287,26 @@ extern void prom_sun4v_guest_soft_state(void);
 extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
 
 /* Client interface level routines. */
-extern void p1275_cmd_direct(unsigned long *);
+extern long p1275_cmd(const char *, long, ...);
+
+#if 0
+#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
+#else
+#define P1275_SIZE(x) x
+#endif
+
+/* We support at most 16 input and 1 output argument */
+#define P1275_ARG_NUMBER               0
+#define P1275_ARG_IN_STRING            1
+#define P1275_ARG_OUT_BUF              2
+#define P1275_ARG_OUT_32B              3
+#define P1275_ARG_IN_FUNCTION          4
+#define P1275_ARG_IN_BUF               5
+#define P1275_ARG_IN_64B               6
+
+#define P1275_IN(x) ((x) & 0xf)
+#define P1275_OUT(x) (((x) << 4) & 0xf0)
+#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
+#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
 
 #endif /* !(__SPARC64_OPLIB_H) */
index 156707b0f18d1ba4a5fbd01fc972fbec8c529c0d..f72080bdda947ec81edce5206842d836755f4521 100644 (file)
@@ -143,7 +143,7 @@ extern unsigned long pfn_base;
 #define phys_to_virt           __va
 
 #define ARCH_PFN_OFFSET                (pfn_base)
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr)    (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
 
 #define pfn_valid(pfn)         (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
 #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
index 43cf002d480b09743fd0f056dd5ee9cec4abb408..ff9ead640c4aa090f33de6f9576766b6d639e6d8 100644 (file)
@@ -228,10 +228,6 @@ static const struct of_device_id ecpp_match[] = {
                .name = "parallel",
                .compatible = "ns87317-ecpp",
        },
-       {
-               .name = "parallel",
-               .compatible = "pnpALI,1533,3",
-       },
        {},
 };
 
index e4c61a18bb2843855c493bb86c45d7787e862a38..a303c9d64d845989e313a326f4c5e92f283148a5 100644 (file)
@@ -5,7 +5,7 @@
 #define RWSEM_UNLOCKED_VALUE           0x00000000
 #define RWSEM_ACTIVE_BIAS              0x00000001
 #define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
+#define RWSEM_WAITING_BIAS             0xffff0000
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
index a232e9e1f4e515d19e8f089872a797c746241777..55db5eca08e25da163b4c2156c51783b5b036f74 100644 (file)
@@ -53,8 +53,8 @@ struct stat {
        ino_t           st_ino;
        mode_t          st_mode;
        short           st_nlink;
-       unsigned short  st_uid;
-       unsigned short  st_gid;
+       uid_t           st_uid;
+       gid_t           st_gid;
        unsigned short  st_rdev;
        off_t           st_size;
        time_t          st_atime;
index 4589ca33220ff6463dbd7191259d01debb5fb13f..f3b5466c389cc5fe1a4e44decf12b3e1a65bae9c 100644 (file)
@@ -99,7 +99,7 @@ static int __devinit clock_board_probe(struct of_device *op,
 
        p->leds_resource.start = (unsigned long)
                (p->clock_regs + CLOCK_CTRL);
-       p->leds_resource.end = p->leds_resource.start;
+       p->leds_resource.end = p->leds_resource.end;
        p->leds_resource.name = "leds";
 
        p->leds_pdev.name = "sunfire-clockboard-leds";
@@ -194,7 +194,7 @@ static int __devinit fhc_probe(struct of_device *op,
        if (!p->central) {
                p->leds_resource.start = (unsigned long)
                        (p->pregs + FHC_PREGS_CTRL);
-               p->leds_resource.end = p->leds_resource.start;
+               p->leds_resource.end = p->leds_resource.end;
                p->leds_resource.name = "leds";
 
                p->leds_pdev.name = "sunfire-fhc-leds";
index c49865b30719d2e8113313591834f34d4d5fa063..2830b415e2147ecfda36acc99d620408472128bd 100644 (file)
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                         * Set some valid stack frames to give to the child.
                         */
                        childstack = (struct sparc_stackf __user *)
-                               (sp & ~0xfUL);
+                               (sp & ~0x7UL);
                        parentstack = (struct sparc_stackf __user *)
                                regs->u_regs[UREG_FP];
 
index cb70476bd8f5ccd72a6f72900195f8a74f6abfcb..c3f1cce0e95e78ec40d466fb2a82f747e1590c48 100644 (file)
@@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
        } else
                __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
 
-       /* Now align the stack as this is mandatory in the Sparc ABI
-        * due to how register windows work.  This hides the
-        * restriction from thread libraries etc.
+       /* Now 8-byte align the stack as this is mandatory in the
+        * Sparc ABI due to how register windows work.  This hides
+        * the restriction from thread libraries etc.  -DaveM
         */
-       csp &= ~15UL;
+       csp &= ~7UL;
 
        distance = fp - psp;
        rval = (csp - distance);
index 75fad425e249bc40559f98d14ead5699839bbbb8..ba5b09ad6666397a953af76945205961868edb29 100644 (file)
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
 };
 
 /* Align macros */
-#define SF_ALIGNEDSZ  (((sizeof(struct signal_frame32) + 15) & (~15)))
-#define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
+#define SF_ALIGNEDSZ  (((sizeof(struct signal_frame32) + 7) & (~7)))
+#define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
 
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
@@ -420,17 +420,15 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
                        sp = current->sas_ss_sp + current->sas_ss_size;
        }
 
-       sp -= framesize;
-
        /* Always align the stack frame.  This handles two cases.  First,
         * sigaltstack need not be mindful of platform specific stack
         * alignment.  Second, if we took this signal because the stack
         * is not aligned properly, we'd like to take the signal cleanly
         * and report that.
         */
-       sp &= ~15UL;
+       sp &= ~7UL;
 
-       return (void __user *) sp;
+       return (void __user *)(sp - framesize);
 }
 
 static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
@@ -453,66 +451,8 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-/* The I-cache flush instruction only works in the primary ASI, which
- * right now is the nucleus, aka. kernel space.
- *
- * Therefore we have to kick the instructions out using the kernel
- * side linear mapping of the physical address backing the user
- * instructions.
- */
-static void flush_signal_insns(unsigned long address)
-{
-       unsigned long pstate, paddr;
-       pte_t *ptep, pte;
-       pgd_t *pgdp;
-       pud_t *pudp;
-       pmd_t *pmdp;
-
-       /* Commit all stores of the instructions we are about to flush.  */
-       wmb();
-
-       /* Disable cross-call reception.  In this way even a very wide
-        * munmap() on another cpu can't tear down the page table
-        * hierarchy from underneath us, since that can't complete
-        * until the IPI tlb flush returns.
-        */
-
-       __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
-       __asm__ __volatile__("wrpr %0, %1, %%pstate"
-                               : : "r" (pstate), "i" (PSTATE_IE));
-
-       pgdp = pgd_offset(current->mm, address);
-       if (pgd_none(*pgdp))
-               goto out_irqs_on;
-       pudp = pud_offset(pgdp, address);
-       if (pud_none(*pudp))
-               goto out_irqs_on;
-       pmdp = pmd_offset(pudp, address);
-       if (pmd_none(*pmdp))
-               goto out_irqs_on;
-
-       ptep = pte_offset_map(pmdp, address);
-       pte = *ptep;
-       if (!pte_present(pte))
-               goto out_unmap;
-
-       paddr = (unsigned long) page_address(pte_page(pte));
-
-       __asm__ __volatile__("flush     %0 + %1"
-                            : /* no outputs */
-                            : "r" (paddr),
-                              "r" (address & (PAGE_SIZE - 1))
-                            : "memory");
-
-out_unmap:
-       pte_unmap(ptep);
-out_irqs_on:
-       __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
-
-}
-
-static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
-                        int signo, sigset_t *oldset)
+static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+                         int signo, sigset_t *oldset)
 {
        struct signal_frame32 __user *sf;
        int sigframe_size;
@@ -605,7 +545,13 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
        if (ka->ka_restorer) {
                regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
        } else {
+               /* Flush instruction space. */
                unsigned long address = ((unsigned long)&(sf->insns[0]));
+               pgd_t *pgdp = pgd_offset(current->mm, address);
+               pud_t *pudp = pud_offset(pgdp, address);
+               pmd_t *pmdp = pmd_offset(pudp, address);
+               pte_t *ptep;
+               pte_t pte;
 
                regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
        
@@ -614,22 +560,34 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
                if (err)
                        goto sigsegv;
 
-               flush_signal_insns(address);
+               preempt_disable();
+               ptep = pte_offset_map(pmdp, address);
+               pte = *ptep;
+               if (pte_present(pte)) {
+                       unsigned long page = (unsigned long)
+                               page_address(pte_page(pte));
+
+                       wmb();
+                       __asm__ __volatile__("flush     %0 + %1"
+                                            : /* no outputs */
+                                            : "r" (page),
+                                              "r" (address & (PAGE_SIZE - 1))
+                                            : "memory");
+               }
+               pte_unmap(ptep);
+               preempt_enable();
        }
-       return 0;
+       return;
 
 sigill:
        do_exit(SIGILL);
-       return -EINVAL;
-
 sigsegv:
        force_sigsegv(signo, current);
-       return -EFAULT;
 }
 
-static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
-                           unsigned long signr, sigset_t *oldset,
-                           siginfo_t *info)
+static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+                            unsigned long signr, sigset_t *oldset,
+                            siginfo_t *info)
 {
        struct rt_signal_frame32 __user *sf;
        int sigframe_size;
@@ -727,7 +685,12 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
        if (ka->ka_restorer)
                regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
        else {
+               /* Flush instruction space. */
                unsigned long address = ((unsigned long)&(sf->insns[0]));
+               pgd_t *pgdp = pgd_offset(current->mm, address);
+               pud_t *pudp = pud_offset(pgdp, address);
+               pmd_t *pmdp = pmd_offset(pudp, address);
+               pte_t *ptep;
 
                regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
        
@@ -739,32 +702,38 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
                if (err)
                        goto sigsegv;
 
-               flush_signal_insns(address);
+               preempt_disable();
+               ptep = pte_offset_map(pmdp, address);
+               if (pte_present(*ptep)) {
+                       unsigned long page = (unsigned long)
+                               page_address(pte_page(*ptep));
+
+                       wmb();
+                       __asm__ __volatile__("flush     %0 + %1"
+                                            : /* no outputs */
+                                            : "r" (page),
+                                              "r" (address & (PAGE_SIZE - 1))
+                                            : "memory");
+               }
+               pte_unmap(ptep);
+               preempt_enable();
        }
-       return 0;
+       return;
 
 sigill:
        do_exit(SIGILL);
-       return -EINVAL;
-
 sigsegv:
        force_sigsegv(signr, current);
-       return -EFAULT;
 }
 
-static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
-                                 siginfo_t *info,
-                                 sigset_t *oldset, struct pt_regs *regs)
+static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
+                                  siginfo_t *info,
+                                  sigset_t *oldset, struct pt_regs *regs)
 {
-       int err;
-
        if (ka->sa.sa_flags & SA_SIGINFO)
-               err = setup_rt_frame32(ka, regs, signr, oldset, info);
+               setup_rt_frame32(ka, regs, signr, oldset, info);
        else
-               err = setup_frame32(ka, regs, signr, oldset);
-
-       if (err)
-               return err;
+               setup_frame32(ka, regs, signr, oldset);
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -772,10 +741,6 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
                sigaddset(&current->blocked,signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
-
-       tracehook_signal_handler(signr, info, ka, regs, 0);
-
-       return 0;
 }
 
 static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -822,14 +787,16 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart32(orig_i0, regs, &ka.sa);
-               if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
-                       /* A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal32(signr, &ka, &info, oldset, regs);
+
+               /* A signal was successfully delivered; the saved
+                * sigmask will have been stored in the signal frame,
+                * and will be restored by sigreturn, so we can simply
+                * clear the TS_RESTORE_SIGMASK flag.
+                */
+               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+
+               tracehook_signal_handler(signr, &info, &ka, regs, 0);
                return;
        }
        if (restart_syscall &&
@@ -840,14 +807,12 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
                regs->u_regs[UREG_I0] = orig_i0;
                regs->tpc -= 4;
                regs->tnpc -= 4;
-               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->tpc -= 4;
                regs->tnpc -= 4;
-               pt_regs_clear_syscall(regs);
        }
 
        /* If there's no signal to deliver, we just put the saved sigmask
index 5e5c5fd03783c997f5c344025e8f4784182a0ddc..7ce1a1005b1da4c3b13f8b87927d060376279f2a 100644 (file)
@@ -267,17 +267,15 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
                        sp = current->sas_ss_sp + current->sas_ss_size;
        }
 
-       sp -= framesize;
-
        /* Always align the stack frame.  This handles two cases.  First,
         * sigaltstack need not be mindful of platform specific stack
         * alignment.  Second, if we took this signal because the stack
         * is not aligned properly, we'd like to take the signal cleanly
         * and report that.
         */
-       sp &= ~15UL;
+       sp &= ~7UL;
 
-       return (void __user *) sp;
+       return (void __user *)(sp - framesize);
 }
 
 static inline int
@@ -315,8 +313,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
-                      int signo, sigset_t *oldset)
+static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                       int signo, sigset_t *oldset)
 {
        struct signal_frame __user *sf;
        int sigframe_size, err;
@@ -384,19 +382,16 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
                /* Flush instruction space. */
                flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
        }
-       return 0;
+       return;
 
 sigill_and_return:
        do_exit(SIGILL);
-       return -EINVAL;
-
 sigsegv:
        force_sigsegv(signo, current);
-       return -EFAULT;
 }
 
-static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
-                         int signo, sigset_t *oldset, siginfo_t *info)
+static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                          int signo, sigset_t *oldset, siginfo_t *info)
 {
        struct rt_signal_frame __user *sf;
        int sigframe_size;
@@ -469,30 +464,22 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
                /* Flush instruction space. */
                flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
        }
-       return 0;
+       return;
 
 sigill:
        do_exit(SIGILL);
-       return -EINVAL;
-
 sigsegv:
        force_sigsegv(signo, current);
-       return -EFAULT;
 }
 
-static inline int
+static inline void
 handle_signal(unsigned long signr, struct k_sigaction *ka,
              siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
-       int err;
-
        if (ka->sa.sa_flags & SA_SIGINFO)
-               err = setup_rt_frame(ka, regs, signr, oldset, info);
+               setup_rt_frame(ka, regs, signr, oldset, info);
        else
-               err = setup_frame(ka, regs, signr, oldset);
-
-       if (err)
-               return err;
+               setup_frame(ka, regs, signr, oldset);
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -500,10 +487,6 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
                sigaddset(&current->blocked, signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
-
-       tracehook_signal_handler(signr, info, ka, regs, 0);
-
-       return 0;
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -561,15 +544,17 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag.
-                        */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
-               }
+               handle_signal(signr, &ka, &info, oldset, regs);
+
+               /* a signal was successfully delivered; the saved
+                * sigmask will have been stored in the signal frame,
+                * and will be restored by sigreturn, so we can simply
+                * clear the TIF_RESTORE_SIGMASK flag.
+                */
+               if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+               tracehook_signal_handler(signr, &info, &ka, regs, 0);
                return;
        }
        if (restart_syscall &&
@@ -580,14 +565,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                regs->u_regs[UREG_I0] = orig_i0;
                regs->pc -= 4;
                regs->npc -= 4;
-               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->pc -= 4;
                regs->npc -= 4;
-               pt_regs_clear_syscall(regs);
        }
 
        /* if there's no signal to deliver, we just put the saved sigmask
index 006fe4515886dc6ae2a7a8e6cc9b6df9c16fda46..647afbda7ae1f170896675dcc4603dfe5c58ec0b 100644 (file)
@@ -353,7 +353,7 @@ segv:
 /* Checks if the fp is valid */
 static int invalid_frame_pointer(void __user *fp, int fplen)
 {
-       if (((unsigned long) fp) & 15)
+       if (((unsigned long) fp) & 7)
                return 1;
        return 0;
 }
@@ -396,20 +396,18 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
                        sp = current->sas_ss_sp + current->sas_ss_size;
        }
 
-       sp -= framesize;
-
        /* Always align the stack frame.  This handles two cases.  First,
         * sigaltstack need not be mindful of platform specific stack
         * alignment.  Second, if we took this signal because the stack
         * is not aligned properly, we'd like to take the signal cleanly
         * and report that.
         */
-       sp &= ~15UL;
+       sp &= ~7UL;
 
-       return (void __user *) sp;
+       return (void __user *)(sp - framesize);
 }
 
-static inline int
+static inline void
 setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
               int signo, sigset_t *oldset, siginfo_t *info)
 {
@@ -483,37 +481,26 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
        }
        /* 4. return to kernel instructions */
        regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
-       return 0;
+       return;
 
 sigill:
        do_exit(SIGILL);
-       return -EINVAL;
-
 sigsegv:
        force_sigsegv(signo, current);
-       return -EFAULT;
 }
 
-static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
-                               siginfo_t *info,
-                               sigset_t *oldset, struct pt_regs *regs)
+static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
+                                siginfo_t *info,
+                                sigset_t *oldset, struct pt_regs *regs)
 {
-       int err;
-
-       err = setup_rt_frame(ka, regs, signr, oldset,
-                            (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
-       if (err)
-               return err;
+       setup_rt_frame(ka, regs, signr, oldset,
+                      (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NOMASK))
                sigaddset(&current->blocked,signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
-
-       tracehook_signal_handler(signr, info, ka, regs, 0);
-
-       return 0;
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -582,14 +569,16 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
-                       /* A signal was successfully delivered; the saved
-                        * sigmask will have been stored in the signal frame,
-                        * and will be restored by sigreturn, so we can simply
-                        * clear the TS_RESTORE_SIGMASK flag.
-                        */
-                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-               }
+               handle_signal(signr, &ka, &info, oldset, regs);
+
+               /* A signal was successfully delivered; the saved
+                * sigmask will have been stored in the signal frame,
+                * and will be restored by sigreturn, so we can simply
+                * clear the TS_RESTORE_SIGMASK flag.
+                */
+               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+
+               tracehook_signal_handler(signr, &info, &ka, regs, 0);
                return;
        }
        if (restart_syscall &&
@@ -600,14 +589,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                regs->u_regs[UREG_I0] = orig_i0;
                regs->tpc -= 4;
                regs->tnpc -= 4;
-               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->tpc -= 4;
                regs->tnpc -= 4;
-               pt_regs_clear_syscall(regs);
        }
 
        /* If there's no signal to deliver, we just put the saved sigmask
index db15d123f05447f9eac74c5fd168c14084eb25a3..8c91d9b29a2f24023849498a41f440b5ec1e6938 100644 (file)
@@ -191,12 +191,10 @@ tsb_dtlb_load:
 
 tsb_itlb_load:
        /* Executable bit must be set.  */
-661:   sethi           %hi(_PAGE_EXEC_4U), %g4
-       andcc           %g5, %g4, %g0
-       .section        .sun4v_2insn_patch, "ax"
+661:   andcc           %g5, _PAGE_EXEC_4U, %g0
+       .section        .sun4v_1insn_patch, "ax"
        .word           661b
        andcc           %g5, _PAGE_EXEC_4V, %g0
-       nop
        .previous
 
        be,pn           %xcc, tsb_do_fault
index 9c86b4b7d4290b75a5967c28790c59b3ee24b5c9..5f27ad779c0c578097218e44b005eaed770acf7d 100644 (file)
@@ -9,18 +9,18 @@
 #include <asm/thread_info.h>
 
        .text
-       .globl  prom_cif_direct
-prom_cif_direct:
-       sethi   %hi(p1275buf), %o1
-       or      %o1, %lo(p1275buf), %o1
-       ldx     [%o1 + 0x0010], %o2     ! prom_cif_stack
-       save    %o2, -192, %sp
-       ldx     [%i1 + 0x0008], %l2     ! prom_cif_handler
+       .globl  prom_cif_interface
+prom_cif_interface:
+       sethi   %hi(p1275buf), %o0
+       or      %o0, %lo(p1275buf), %o0
+       ldx     [%o0 + 0x010], %o1      ! prom_cif_stack
+       save    %o1, -192, %sp
+       ldx     [%i0 + 0x008], %l2      ! prom_cif_handler
        mov     %g4, %l0
        mov     %g5, %l1
        mov     %g6, %l3
        call    %l2
-        mov    %i0, %o0                ! prom_args
+        add    %i0, 0x018, %o0         ! prom_args
        mov     %l0, %g4
        mov     %l1, %g5
        mov     %l3, %g6
index 7b707b6a3fcaf6162e563bc7f7f6624fa1e047ca..e1c3fc87484dd3978126050e60abc786cba8e0bb 100644 (file)
@@ -21,22 +21,14 @@ extern int prom_stdin, prom_stdout;
 inline int
 prom_nbgetchar(void)
 {
-       unsigned long args[7];
        char inc;
 
-       args[0] = (unsigned long) "read";
-       args[1] = 3;
-       args[2] = 1;
-       args[3] = (unsigned int) prom_stdin;
-       args[4] = (unsigned long) &inc;
-       args[5] = 1;
-       args[6] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       if (args[6] == 1)
+       if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
+                             P1275_INOUT(3,1),
+                             prom_stdin, &inc, P1275_SIZE(1)) == 1)
                return inc;
-       return -1;
+       else
+               return -1;
 }
 
 /* Non blocking put character to console device, returns -1 if
@@ -45,22 +37,12 @@ prom_nbgetchar(void)
 inline int
 prom_nbputchar(char c)
 {
-       unsigned long args[7];
        char outc;
        
        outc = c;
-
-       args[0] = (unsigned long) "write";
-       args[1] = 3;
-       args[2] = 1;
-       args[3] = (unsigned int) prom_stdout;
-       args[4] = (unsigned long) &outc;
-       args[5] = 1;
-       args[6] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       if (args[6] == 1)
+       if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
+                              P1275_INOUT(3,1),
+                              prom_stdout, &outc, P1275_SIZE(1)) == 1)
                return 0;
        else
                return -1;
@@ -86,15 +68,7 @@ prom_putchar(char c)
 void
 prom_puts(const char *s, int len)
 {
-       unsigned long args[7];
-
-       args[0] = (unsigned long) "write";
-       args[1] = 3;
-       args[2] = 1;
-       args[3] = (unsigned int) prom_stdout;
-       args[4] = (unsigned long) s;
-       args[5] = len;
-       args[6] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
+       p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
+                          P1275_INOUT(3,1),
+                          prom_stdout, s, P1275_SIZE(len));
 }
index a017119e7ef17c40ee8fa8328252f921c59a6ca8..9dbd803e46e1f4c460300c6f5f2485f3d6c54e7d 100644 (file)
 int
 prom_devopen(const char *dstr)
 {
-       unsigned long args[5];
-
-       args[0] = (unsigned long) "open";
-       args[1] = 1;
-       args[2] = 1;
-       args[3] = (unsigned long) dstr;
-       args[4] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[4];
+       return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
+                                 P1275_INOUT(1,1),
+                                 dstr);
 }
 
 /* Close the device described by device handle 'dhandle'. */
 int
 prom_devclose(int dhandle)
 {
-       unsigned long args[4];
-
-       args[0] = (unsigned long) "close";
-       args[1] = 1;
-       args[2] = 0;
-       args[3] = (unsigned int) dhandle;
-
-       p1275_cmd_direct(args);
-
+       p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
        return 0;
 }
 
@@ -53,15 +37,5 @@ prom_devclose(int dhandle)
 void
 prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
 {
-       unsigned long args[7];
-
-       args[0] = (unsigned long) "seek";
-       args[1] = 3;
-       args[2] = 1;
-       args[3] = (unsigned int) dhandle;
-       args[4] = seekhi;
-       args[5] = seeklo;
-       args[6] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
+       p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
 }
index 6cb1581d6aef507fca9c036708cebe24e5ab053c..39fc6af21b7c55ddc0e752c80c745ed6c018820a 100644 (file)
 
 int prom_service_exists(const char *service_name)
 {
-       unsigned long args[5];
+       int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
+                           P1275_INOUT(1, 1), service_name);
 
-       args[0] = (unsigned long) "test";
-       args[1] = 1;
-       args[2] = 1;
-       args[3] = (unsigned long) service_name;
-       args[4] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       if (args[4])
+       if (err)
                return 0;
        return 1;
 }
@@ -38,47 +31,30 @@ int prom_service_exists(const char *service_name)
 void prom_sun4v_guest_soft_state(void)
 {
        const char *svc = "SUNW,soft-state-supported";
-       unsigned long args[3];
 
        if (!prom_service_exists(svc))
                return;
-       args[0] = (unsigned long) svc;
-       args[1] = 0;
-       args[2] = 0;
-       p1275_cmd_direct(args);
+       p1275_cmd(svc, P1275_INOUT(0, 0));
 }
 
 /* Reset and reboot the machine with the command 'bcommand'. */
 void prom_reboot(const char *bcommand)
 {
-       unsigned long args[4];
-
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_reboot(bcommand);
 #endif
-       args[0] = (unsigned long) "boot";
-       args[1] = 1;
-       args[2] = 0;
-       args[3] = (unsigned long) bcommand;
-
-       p1275_cmd_direct(args);
+       p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
+                 P1275_INOUT(1, 0), bcommand);
 }
 
 /* Forth evaluate the expression contained in 'fstring'. */
 void prom_feval(const char *fstring)
 {
-       unsigned long args[5];
-
        if (!fstring || fstring[0] == 0)
                return;
-       args[0] = (unsigned long) "interpret";
-       args[1] = 1;
-       args[2] = 1;
-       args[3] = (unsigned long) fstring;
-       args[4] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
+       p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
+                 P1275_INOUT(1, 1), fstring);
 }
 EXPORT_SYMBOL(prom_feval);
 
@@ -92,7 +68,6 @@ extern void smp_release(void);
  */
 void prom_cmdline(void)
 {
-       unsigned long args[3];
        unsigned long flags;
 
        local_irq_save(flags);
@@ -101,11 +76,7 @@ void prom_cmdline(void)
        smp_capture();
 #endif
 
-       args[0] = (unsigned long) "enter";
-       args[1] = 0;
-       args[2] = 0;
-
-       p1275_cmd_direct(args);
+       p1275_cmd("enter", P1275_INOUT(0, 0));
 
 #ifdef CONFIG_SMP
        smp_release();
@@ -119,32 +90,22 @@ void prom_cmdline(void)
  */
 void notrace prom_halt(void)
 {
-       unsigned long args[3];
-
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_power_off();
 #endif
 again:
-       args[0] = (unsigned long) "exit";
-       args[1] = 0;
-       args[2] = 0;
-       p1275_cmd_direct(args);
+       p1275_cmd("exit", P1275_INOUT(0, 0));
        goto again; /* PROM is out to get me -DaveM */
 }
 
 void prom_halt_power_off(void)
 {
-       unsigned long args[3];
-
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_power_off();
 #endif
-       args[0] = (unsigned long) "SUNW,power-off";
-       args[1] = 0;
-       args[2] = 0;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
 
        /* if nothing else helps, we just halt */
        prom_halt();
@@ -153,15 +114,10 @@ void prom_halt_power_off(void)
 /* Set prom sync handler to call function 'funcp'. */
 void prom_setcallback(callback_func_t funcp)
 {
-       unsigned long args[5];
        if (!funcp)
                return;
-       args[0] = (unsigned long) "set-callback";
-       args[1] = 1;
-       args[2] = 1;
-       args[3] = (unsigned long) funcp;
-       args[4] = (unsigned long) -1;
-       p1275_cmd_direct(args);
+       p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) |
+                 P1275_INOUT(1, 1), funcp);
 }
 
 /* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
@@ -217,61 +173,57 @@ static int prom_get_memory_ihandle(void)
 }
 
 /* Load explicit I/D TLB entries. */
-static long tlb_load(const char *type, unsigned long index,
-                    unsigned long tte_data, unsigned long vaddr)
-{
-       unsigned long args[9];
-
-       args[0] = (unsigned long) prom_callmethod_name;
-       args[1] = 5;
-       args[2] = 1;
-       args[3] = (unsigned long) type;
-       args[4] = (unsigned int) prom_get_mmu_ihandle();
-       args[5] = vaddr;
-       args[6] = tte_data;
-       args[7] = index;
-       args[8] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (long) args[8];
-}
-
 long prom_itlb_load(unsigned long index,
                    unsigned long tte_data,
                    unsigned long vaddr)
 {
-       return tlb_load("SUNW,itlb-load", index, tte_data, vaddr);
+       return p1275_cmd(prom_callmethod_name,
+                        (P1275_ARG(0, P1275_ARG_IN_STRING) |
+                         P1275_ARG(2, P1275_ARG_IN_64B) |
+                         P1275_ARG(3, P1275_ARG_IN_64B) |
+                         P1275_INOUT(5, 1)),
+                        "SUNW,itlb-load",
+                        prom_get_mmu_ihandle(),
+                        /* And then our actual args are pushed backwards. */
+                        vaddr,
+                        tte_data,
+                        index);
 }
 
 long prom_dtlb_load(unsigned long index,
                    unsigned long tte_data,
                    unsigned long vaddr)
 {
-       return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr);
+       return p1275_cmd(prom_callmethod_name,
+                        (P1275_ARG(0, P1275_ARG_IN_STRING) |
+                         P1275_ARG(2, P1275_ARG_IN_64B) |
+                         P1275_ARG(3, P1275_ARG_IN_64B) |
+                         P1275_INOUT(5, 1)),
+                        "SUNW,dtlb-load",
+                        prom_get_mmu_ihandle(),
+                        /* And then our actual args are pushed backwards. */
+                        vaddr,
+                        tte_data,
+                        index);
 }
 
 int prom_map(int mode, unsigned long size,
             unsigned long vaddr, unsigned long paddr)
 {
-       unsigned long args[11];
-       int ret;
-
-       args[0] = (unsigned long) prom_callmethod_name;
-       args[1] = 7;
-       args[2] = 1;
-       args[3] = (unsigned long) prom_map_name;
-       args[4] = (unsigned int) prom_get_mmu_ihandle();
-       args[5] = (unsigned int) mode;
-       args[6] = size;
-       args[7] = vaddr;
-       args[8] = 0;
-       args[9] = paddr;
-       args[10] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       ret = (int) args[10];
+       int ret = p1275_cmd(prom_callmethod_name,
+                           (P1275_ARG(0, P1275_ARG_IN_STRING) |
+                            P1275_ARG(3, P1275_ARG_IN_64B) |
+                            P1275_ARG(4, P1275_ARG_IN_64B) |
+                            P1275_ARG(6, P1275_ARG_IN_64B) |
+                            P1275_INOUT(7, 1)),
+                           prom_map_name,
+                           prom_get_mmu_ihandle(),
+                           mode,
+                           size,
+                           vaddr,
+                           0,
+                           paddr);
+
        if (ret == 0)
                ret = -1;
        return ret;
@@ -279,51 +231,40 @@ int prom_map(int mode, unsigned long size,
 
 void prom_unmap(unsigned long size, unsigned long vaddr)
 {
-       unsigned long args[7];
-
-       args[0] = (unsigned long) prom_callmethod_name;
-       args[1] = 4;
-       args[2] = 0;
-       args[3] = (unsigned long) prom_unmap_name;
-       args[4] = (unsigned int) prom_get_mmu_ihandle();
-       args[5] = size;
-       args[6] = vaddr;
-
-       p1275_cmd_direct(args);
+       p1275_cmd(prom_callmethod_name,
+                 (P1275_ARG(0, P1275_ARG_IN_STRING) |
+                  P1275_ARG(2, P1275_ARG_IN_64B) |
+                  P1275_ARG(3, P1275_ARG_IN_64B) |
+                  P1275_INOUT(4, 0)),
+                 prom_unmap_name,
+                 prom_get_mmu_ihandle(),
+                 size,
+                 vaddr);
 }
 
 /* Set aside physical memory which is not touched or modified
  * across soft resets.
  */
-int prom_retain(const char *name, unsigned long size,
-               unsigned long align, unsigned long *paddr)
+unsigned long prom_retain(const char *name,
+                         unsigned long pa_low, unsigned long pa_high,
+                         long size, long align)
 {
-       unsigned long args[11];
-
-       args[0] = (unsigned long) prom_callmethod_name;
-       args[1] = 5;
-       args[2] = 3;
-       args[3] = (unsigned long) "SUNW,retain";
-       args[4] = (unsigned int) prom_get_memory_ihandle();
-       args[5] = align;
-       args[6] = size;
-       args[7] = (unsigned long) name;
-       args[8] = (unsigned long) -1;
-       args[9] = (unsigned long) -1;
-       args[10] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       if (args[8])
-               return (int) args[8];
-
-       /* Next we get "phys_high" then "phys_low".  On 64-bit
-        * the phys_high cell is don't care since the phys_low
-        * cell has the full value.
+       /* XXX I don't think we return multiple values correctly.
+        * XXX OBP supposedly returns pa_low/pa_high here, how does
+        * XXX it work?
         */
-       *paddr = args[10];
 
-       return 0;
+       /* If align is zero, the pa_low/pa_high args are passed,
+        * else they are not.
+        */
+       if (align == 0)
+               return p1275_cmd("SUNW,retain",
+                                (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)),
+                                name, pa_low, pa_high, size, align);
+       else
+               return p1275_cmd("SUNW,retain",
+                                (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)),
+                                name, size, align);
 }
 
 /* Get "Unumber" string for the SIMM at the given
@@ -336,129 +277,62 @@ int prom_getunumber(int syndrome_code,
                    unsigned long phys_addr,
                    char *buf, int buflen)
 {
-       unsigned long args[12];
-
-       args[0] = (unsigned long) prom_callmethod_name;
-       args[1] = 7;
-       args[2] = 2;
-       args[3] = (unsigned long) "SUNW,get-unumber";
-       args[4] = (unsigned int) prom_get_memory_ihandle();
-       args[5] = buflen;
-       args[6] = (unsigned long) buf;
-       args[7] = 0;
-       args[8] = phys_addr;
-       args[9] = (unsigned int) syndrome_code;
-       args[10] = (unsigned long) -1;
-       args[11] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[10];
+       return p1275_cmd(prom_callmethod_name,
+                        (P1275_ARG(0, P1275_ARG_IN_STRING)     |
+                         P1275_ARG(3, P1275_ARG_OUT_BUF)       |
+                         P1275_ARG(6, P1275_ARG_IN_64B)        |
+                         P1275_INOUT(8, 2)),
+                        "SUNW,get-unumber", prom_get_memory_ihandle(),
+                        buflen, buf, P1275_SIZE(buflen),
+                        0, phys_addr, syndrome_code);
 }
 
 /* Power management extensions. */
 void prom_sleepself(void)
 {
-       unsigned long args[3];
-
-       args[0] = (unsigned long) "SUNW,sleep-self";
-       args[1] = 0;
-       args[2] = 0;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0));
 }
 
 int prom_sleepsystem(void)
 {
-       unsigned long args[4];
-
-       args[0] = (unsigned long) "SUNW,sleep-system";
-       args[1] = 0;
-       args[2] = 1;
-       args[3] = (unsigned long) -1;
-       p1275_cmd_direct(args);
-
-       return (int) args[3];
+       return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1));
 }
 
 int prom_wakeupsystem(void)
 {
-       unsigned long args[4];
-
-       args[0] = (unsigned long) "SUNW,wakeup-system";
-       args[1] = 0;
-       args[2] = 1;
-       args[3] = (unsigned long) -1;
-       p1275_cmd_direct(args);
-
-       return (int) args[3];
+       return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1));
 }
 
 #ifdef CONFIG_SMP
 void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
 {
-       unsigned long args[6];
-
-       args[0] = (unsigned long) "SUNW,start-cpu";
-       args[1] = 3;
-       args[2] = 0;
-       args[3] = (unsigned int) cpunode;
-       args[4] = pc;
-       args[5] = arg;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg);
 }
 
 void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
 {
-       unsigned long args[6];
-
-       args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid";
-       args[1] = 3;
-       args[2] = 0;
-       args[3] = (unsigned int) cpuid;
-       args[4] = pc;
-       args[5] = arg;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0),
+                 cpuid, pc, arg);
 }
 
 void prom_stopcpu_cpuid(int cpuid)
 {
-       unsigned long args[4];
-
-       args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid";
-       args[1] = 1;
-       args[2] = 0;
-       args[3] = (unsigned int) cpuid;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0),
+                 cpuid);
 }
 
 void prom_stopself(void)
 {
-       unsigned long args[3];
-
-       args[0] = (unsigned long) "SUNW,stop-self";
-       args[1] = 0;
-       args[2] = 0;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0));
 }
 
 void prom_idleself(void)
 {
-       unsigned long args[3];
-
-       args[0] = (unsigned long) "SUNW,idle-self";
-       args[1] = 0;
-       args[2] = 0;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0));
 }
 
 void prom_resumecpu(int cpunode)
 {
-       unsigned long args[4];
-
-       args[0] = (unsigned long) "SUNW,resume-cpu";
-       args[1] = 1;
-       args[2] = 0;
-       args[3] = (unsigned int) cpunode;
-       p1275_cmd_direct(args);
+       p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode);
 }
 #endif
index 7ae5b5408d7c2103d7164f606d59678147e91304..4b7c937bba61704cf386e76c7907bbdcf1bcb759 100644 (file)
@@ -22,32 +22,126 @@ struct {
        long prom_callback;                     /* 0x00 */
        void (*prom_cif_handler)(long *);       /* 0x08 */
        unsigned long prom_cif_stack;           /* 0x10 */
+       unsigned long prom_args [23];           /* 0x18 */
+       char prom_buffer [3000];
 } p1275buf;
 
 extern void prom_world(int);
 
-extern void prom_cif_direct(unsigned long *args);
+extern void prom_cif_interface(void);
 extern void prom_cif_callback(void);
 
 /*
- * This provides SMP safety on the p1275buf.
+ * This provides SMP safety on the p1275buf. prom_callback() drops this lock
+ * to allow recursuve acquisition.
  */
 DEFINE_SPINLOCK(prom_entry_lock);
 
-void p1275_cmd_direct(unsigned long *args)
+long p1275_cmd(const char *service, long fmt, ...)
 {
+       char *p, *q;
        unsigned long flags;
+       int nargs, nrets, i;
+       va_list list;
+       long attrs, x;
+       
+       p = p1275buf.prom_buffer;
 
-       raw_local_save_flags(flags);
-       raw_local_irq_restore(PIL_NMI);
-       spin_lock(&prom_entry_lock);
+       spin_lock_irqsave(&prom_entry_lock, flags);
+
+       p1275buf.prom_args[0] = (unsigned long)p;               /* service */
+       strcpy (p, service);
+       p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
+       p1275buf.prom_args[1] = nargs = (fmt & 0x0f);           /* nargs */
+       p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4);    /* nrets */
+       attrs = fmt >> 8;
+       va_start(list, fmt);
+       for (i = 0; i < nargs; i++, attrs >>= 3) {
+               switch (attrs & 0x7) {
+               case P1275_ARG_NUMBER:
+                       p1275buf.prom_args[i + 3] =
+                                               (unsigned)va_arg(list, long);
+                       break;
+               case P1275_ARG_IN_64B:
+                       p1275buf.prom_args[i + 3] =
+                               va_arg(list, unsigned long);
+                       break;
+               case P1275_ARG_IN_STRING:
+                       strcpy (p, va_arg(list, char *));
+                       p1275buf.prom_args[i + 3] = (unsigned long)p;
+                       p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
+                       break;
+               case P1275_ARG_OUT_BUF:
+                       (void) va_arg(list, char *);
+                       p1275buf.prom_args[i + 3] = (unsigned long)p;
+                       x = va_arg(list, long);
+                       i++; attrs >>= 3;
+                       p = (char *)(((long)(p + (int)x + 7)) & ~7);
+                       p1275buf.prom_args[i + 3] = x;
+                       break;
+               case P1275_ARG_IN_BUF:
+                       q = va_arg(list, char *);
+                       p1275buf.prom_args[i + 3] = (unsigned long)p;
+                       x = va_arg(list, long);
+                       i++; attrs >>= 3;
+                       memcpy (p, q, (int)x);
+                       p = (char *)(((long)(p + (int)x + 7)) & ~7);
+                       p1275buf.prom_args[i + 3] = x;
+                       break;
+               case P1275_ARG_OUT_32B:
+                       (void) va_arg(list, char *);
+                       p1275buf.prom_args[i + 3] = (unsigned long)p;
+                       p += 32;
+                       break;
+               case P1275_ARG_IN_FUNCTION:
+                       p1275buf.prom_args[i + 3] =
+                                       (unsigned long)prom_cif_callback;
+                       p1275buf.prom_callback = va_arg(list, long);
+                       break;
+               }
+       }
+       va_end(list);
 
        prom_world(1);
-       prom_cif_direct(args);
+       prom_cif_interface();
        prom_world(0);
 
-       spin_unlock(&prom_entry_lock);
-       raw_local_irq_restore(flags);
+       attrs = fmt >> 8;
+       va_start(list, fmt);
+       for (i = 0; i < nargs; i++, attrs >>= 3) {
+               switch (attrs & 0x7) {
+               case P1275_ARG_NUMBER:
+                       (void) va_arg(list, long);
+                       break;
+               case P1275_ARG_IN_STRING:
+                       (void) va_arg(list, char *);
+                       break;
+               case P1275_ARG_IN_FUNCTION:
+                       (void) va_arg(list, long);
+                       break;
+               case P1275_ARG_IN_BUF:
+                       (void) va_arg(list, char *);
+                       (void) va_arg(list, long);
+                       i++; attrs >>= 3;
+                       break;
+               case P1275_ARG_OUT_BUF:
+                       p = va_arg(list, char *);
+                       x = va_arg(list, long);
+                       memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
+                       i++; attrs >>= 3;
+                       break;
+               case P1275_ARG_OUT_32B:
+                       p = va_arg(list, char *);
+                       memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
+                       break;
+               }
+       }
+       va_end(list);
+       x = p1275buf.prom_args [nargs + 3];
+
+       spin_unlock_irqrestore(&prom_entry_lock, flags);
+
+       return x;
 }
 
 void prom_cif_init(void *cif_handler, void *cif_stack)
index 6a05c76f58fdec24c135f6734ec5aaecc971b0a3..8ea73ddc61dcb6fc69e62a235e98e5066fee29b8 100644 (file)
 #include <asm/oplib.h>
 #include <asm/ldc.h>
 
-static int prom_node_to_node(const char *type, int node)
-{
-       unsigned long args[5];
-
-       args[0] = (unsigned long) type;
-       args[1] = 1;
-       args[2] = 1;
-       args[3] = (unsigned int) node;
-       args[4] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[4];
-}
-
 /* Return the child of node 'node' or zero if no this node has no
  * direct descendent.
  */
 inline int __prom_getchild(int node)
 {
-       return prom_node_to_node("child", node);
+       return p1275_cmd ("child", P1275_INOUT(1, 1), node);
 }
 
 inline int prom_getchild(int node)
 {
        int cnode;
 
-       if (node == -1)
-               return 0;
+       if(node == -1) return 0;
        cnode = __prom_getchild(node);
-       if (cnode == -1)
-               return 0;
-       return cnode;
+       if(cnode == -1) return 0;
+       return (int)cnode;
 }
 EXPORT_SYMBOL(prom_getchild);
 
@@ -56,12 +39,10 @@ inline int prom_getparent(int node)
 {
        int cnode;
 
-       if (node == -1)
-               return 0;
-       cnode = prom_node_to_node("parent", node);
-       if (cnode == -1)
-               return 0;
-       return cnode;
+       if(node == -1) return 0;
+       cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
+       if(cnode == -1) return 0;
+       return (int)cnode;
 }
 
 /* Return the next sibling of node 'node' or zero if no more siblings
@@ -69,7 +50,7 @@ inline int prom_getparent(int node)
  */
 inline int __prom_getsibling(int node)
 {
-       return prom_node_to_node(prom_peer_name, node);
+       return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node);
 }
 
 inline int prom_getsibling(int node)
@@ -91,21 +72,11 @@ EXPORT_SYMBOL(prom_getsibling);
  */
 inline int prom_getproplen(int node, const char *prop)
 {
-       unsigned long args[6];
-
-       if (!node || !prop)
-               return -1;
-
-       args[0] = (unsigned long) "getproplen";
-       args[1] = 2;
-       args[2] = 1;
-       args[3] = (unsigned int) node;
-       args[4] = (unsigned long) prop;
-       args[5] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[5];
+       if((!node) || (!prop)) return -1;
+       return p1275_cmd ("getproplen", 
+                         P1275_ARG(1,P1275_ARG_IN_STRING)|
+                         P1275_INOUT(2, 1), 
+                         node, prop);
 }
 EXPORT_SYMBOL(prom_getproplen);
 
@@ -116,25 +87,19 @@ EXPORT_SYMBOL(prom_getproplen);
 inline int prom_getproperty(int node, const char *prop,
                            char *buffer, int bufsize)
 {
-       unsigned long args[8];
        int plen;
 
        plen = prom_getproplen(node, prop);
-       if ((plen > bufsize) || (plen == 0) || (plen == -1))
+       if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
                return -1;
-
-       args[0] = (unsigned long) prom_getprop_name;
-       args[1] = 4;
-       args[2] = 1;
-       args[3] = (unsigned int) node;
-       args[4] = (unsigned long) prop;
-       args[5] = (unsigned long) buffer;
-       args[6] = bufsize;
-       args[7] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[7];
+       } else {
+               /* Ok, things seem all right. */
+               return p1275_cmd(prom_getprop_name, 
+                                P1275_ARG(1,P1275_ARG_IN_STRING)|
+                                P1275_ARG(2,P1275_ARG_OUT_BUF)|
+                                P1275_INOUT(4, 1), 
+                                node, prop, buffer, P1275_SIZE(plen));
+       }
 }
 EXPORT_SYMBOL(prom_getproperty);
 
@@ -145,7 +110,7 @@ inline int prom_getint(int node, const char *prop)
 {
        int intprop;
 
-       if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+       if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
                return intprop;
 
        return -1;
@@ -161,8 +126,7 @@ int prom_getintdefault(int node, const char *property, int deflt)
        int retval;
 
        retval = prom_getint(node, property);
-       if (retval == -1)
-               return deflt;
+       if(retval == -1) return deflt;
 
        return retval;
 }
@@ -174,8 +138,7 @@ int prom_getbool(int node, const char *prop)
        int retval;
 
        retval = prom_getproplen(node, prop);
-       if (retval == -1)
-               return 0;
+       if(retval == -1) return 0;
        return 1;
 }
 EXPORT_SYMBOL(prom_getbool);
@@ -189,8 +152,7 @@ void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
        int len;
 
        len = prom_getproperty(node, prop, user_buf, ubuf_size);
-       if (len != -1)
-               return;
+       if(len != -1) return;
        user_buf[0] = 0;
        return;
 }
@@ -203,8 +165,7 @@ int prom_nodematch(int node, const char *name)
 {
        char namebuf[128];
        prom_getproperty(node, "name", namebuf, sizeof(namebuf));
-       if (strcmp(namebuf, name) == 0)
-               return 1;
+       if(strcmp(namebuf, name) == 0) return 1;
        return 0;
 }
 
@@ -230,29 +191,16 @@ int prom_searchsiblings(int node_start, const char *nodename)
 }
 EXPORT_SYMBOL(prom_searchsiblings);
 
-static const char *prom_nextprop_name = "nextprop";
-
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
  */
 inline char *prom_firstprop(int node, char *buffer)
 {
-       unsigned long args[7];
-
        *buffer = 0;
-       if (node == -1)
-               return buffer;
-
-       args[0] = (unsigned long) prom_nextprop_name;
-       args[1] = 3;
-       args[2] = 1;
-       args[3] = (unsigned int) node;
-       args[4] = 0;
-       args[5] = (unsigned long) buffer;
-       args[6] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
+       if(node == -1) return buffer;
+       p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
+                              P1275_INOUT(3, 0), 
+                              node, (char *) 0x0, buffer);
        return buffer;
 }
 EXPORT_SYMBOL(prom_firstprop);
@@ -263,10 +211,9 @@ EXPORT_SYMBOL(prom_firstprop);
  */
 inline char *prom_nextprop(int node, const char *oprop, char *buffer)
 {
-       unsigned long args[7];
        char buf[32];
 
-       if (node == -1) {
+       if(node == -1) {
                *buffer = 0;
                return buffer;
        }
@@ -274,17 +221,10 @@ inline char *prom_nextprop(int node, const char *oprop, char *buffer)
                strcpy (buf, oprop);
                oprop = buf;
        }
-
-       args[0] = (unsigned long) prom_nextprop_name;
-       args[1] = 3;
-       args[2] = 1;
-       args[3] = (unsigned int) node;
-       args[4] = (unsigned long) oprop;
-       args[5] = (unsigned long) buffer;
-       args[6] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
+       p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
+                                   P1275_ARG(2,P1275_ARG_OUT_32B)|
+                                   P1275_INOUT(3, 0), 
+                                   node, oprop, buffer); 
        return buffer;
 }
 EXPORT_SYMBOL(prom_nextprop);
@@ -292,19 +232,12 @@ EXPORT_SYMBOL(prom_nextprop);
 int
 prom_finddevice(const char *name)
 {
-       unsigned long args[5];
-
        if (!name)
                return 0;
-       args[0] = (unsigned long) "finddevice";
-       args[1] = 1;
-       args[2] = 1;
-       args[3] = (unsigned long) name;
-       args[4] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[4];
+       return p1275_cmd(prom_finddev_name,
+                        P1275_ARG(0,P1275_ARG_IN_STRING)|
+                        P1275_INOUT(1, 1), 
+                        name);
 }
 EXPORT_SYMBOL(prom_finddevice);
 
@@ -315,7 +248,7 @@ int prom_node_has_property(int node, const char *prop)
        *buf = 0;
        do {
                prom_nextprop(node, buf, buf);
-               if (!strcmp(buf, prop))
+               if(!strcmp(buf, prop))
                        return 1;
        } while (*buf);
        return 0;
@@ -328,8 +261,6 @@ EXPORT_SYMBOL(prom_node_has_property);
 int
 prom_setprop(int node, const char *pname, char *value, int size)
 {
-       unsigned long args[8];
-
        if (size == 0)
                return 0;
        if ((pname == 0) || (value == 0))
@@ -341,37 +272,19 @@ prom_setprop(int node, const char *pname, char *value, int size)
                return 0;
        }
 #endif
-       args[0] = (unsigned long) "setprop";
-       args[1] = 4;
-       args[2] = 1;
-       args[3] = (unsigned int) node;
-       args[4] = (unsigned long) pname;
-       args[5] = (unsigned long) value;
-       args[6] = size;
-       args[7] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[7];
+       return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
+                                         P1275_ARG(2,P1275_ARG_IN_BUF)|
+                                         P1275_INOUT(4, 1), 
+                                         node, pname, value, P1275_SIZE(size));
 }
 EXPORT_SYMBOL(prom_setprop);
 
 inline int prom_inst2pkg(int inst)
 {
-       unsigned long args[5];
        int node;
        
-       args[0] = (unsigned long) "instance-to-package";
-       args[1] = 1;
-       args[2] = 1;
-       args[3] = (unsigned int) inst;
-       args[4] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       node = (int) args[4];
-       if (node == -1)
-               return 0;
+       node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
+       if (node == -1) return 0;
        return node;
 }
 
@@ -384,28 +297,17 @@ prom_pathtoinode(const char *path)
        int node, inst;
 
        inst = prom_devopen (path);
-       if (inst == 0)
-               return 0;
-       node = prom_inst2pkg(inst);
-       prom_devclose(inst);
-       if (node == -1)
-               return 0;
+       if (inst == 0) return 0;
+       node = prom_inst2pkg (inst);
+       prom_devclose (inst);
+       if (node == -1) return 0;
        return node;
 }
 
 int prom_ihandle2path(int handle, char *buffer, int bufsize)
 {
-       unsigned long args[7];
-
-       args[0] = (unsigned long) "instance-to-path";
-       args[1] = 3;
-       args[2] = 1;
-       args[3] = (unsigned int) handle;
-       args[4] = (unsigned long) buffer;
-       args[5] = bufsize;
-       args[6] = (unsigned long) -1;
-
-       p1275_cmd_direct(args);
-
-       return (int) args[6];
+       return p1275_cmd("instance-to-path",
+                        P1275_ARG(1,P1275_ARG_OUT_BUF)|
+                        P1275_INOUT(3, 1),
+                        handle, buffer, P1275_SIZE(bufsize));
 }
index ec8a0eea13f707b1579f54b4846b1ed01644f17d..cf8a97f3451856665b11fdb6172b67953d6ee513 100644 (file)
@@ -727,9 +727,6 @@ struct winch {
 
 static void free_winch(struct winch *winch, int free_irq_ok)
 {
-       if (free_irq_ok)
-               free_irq(WINCH_IRQ, winch);
-
        list_del(&winch->list);
 
        if (winch->pid != -1)
@@ -738,6 +735,8 @@ static void free_winch(struct winch *winch, int free_irq_ok)
                os_close_file(winch->fd);
        if (winch->stack != 0)
                free_stack(winch->stack, 0);
+       if (free_irq_ok)
+               free_irq(WINCH_IRQ, winch);
        kfree(winch);
 }
 
index 9fcf26c5216ee6595efcb600591b5c80dc204894..635d16d90a80d4158df7769ec11df5ab6c64c38d 100644 (file)
@@ -160,7 +160,6 @@ struct ubd {
        struct scatterlist sg[MAX_SG];
        struct request *request;
        int start_sg, end_sg;
-       sector_t rq_pos;
 };
 
 #define DEFAULT_COW { \
@@ -185,7 +184,6 @@ struct ubd {
        .request =              NULL, \
        .start_sg =             0, \
        .end_sg =               0, \
-       .rq_pos =               0, \
 }
 
 /* Protected by ubd_lock */
@@ -1224,6 +1222,7 @@ static void do_ubd_request(struct request_queue *q)
 {
        struct io_thread_req *io_req;
        struct request *req;
+       sector_t sector;
        int n;
 
        while(1){
@@ -1234,12 +1233,12 @@ static void do_ubd_request(struct request_queue *q)
                                return;
 
                        dev->request = req;
-                       dev->rq_pos = blk_rq_pos(req);
                        dev->start_sg = 0;
                        dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
                }
 
                req = dev->request;
+               sector = blk_rq_pos(req);
                while(dev->start_sg < dev->end_sg){
                        struct scatterlist *sg = &dev->sg[dev->start_sg];
 
@@ -1251,9 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
                                return;
                        }
                        prepare_request(req, io_req,
-                                       (unsigned long long)dev->rq_pos << 9,
+                                       (unsigned long long)sector << 9,
                                        sg->offset, sg->length, sg_page(sg));
 
+                       sector += sg->length >> 9;
                        n = os_write_file(thread_fd, &io_req,
                                          sizeof(struct io_thread_req *));
                        if(n != sizeof(struct io_thread_req *)){
@@ -1266,7 +1266,6 @@ static void do_ubd_request(struct request_queue *q)
                                return;
                        }
 
-                       dev->rq_pos += sg->length >> 9;
                        dev->start_sg++;
                }
                dev->end_sg = 0;
index 664f94216eb739cdb799b4396f30a0d96d3dc0f4..e7a6cca667aa5c0a6ec0a8df5392976d8d97aea9 100644 (file)
@@ -22,7 +22,7 @@ SECTIONS
   _text = .;
   _stext = .;
   __init_begin = .;
-  INIT_TEXT_SECTION(0)
+  INIT_TEXT_SECTION(PAGE_SIZE)
   . = ALIGN(PAGE_SIZE);
 
   .text      :
index 6e3359d6a8394c0d2f811a28a24f41e673e8793c..dec5678fc17f7b65ef5b1113d8f6246f910bab79 100644 (file)
@@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv)
 long long disable_timer(void)
 {
        struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
-       long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
+       int remain, max = UM_NSEC_PER_SEC / UM_HZ;
 
        if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
                printk(UM_KERN_ERR "disable_timer - setitimer failed, "
index c1ea9eb04466b00a35c8a895d09887546e78abfc..2201e9c20e4a85ec4673939f27e15a3ef3431f94 100644 (file)
@@ -8,8 +8,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
        setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
        sysrq.o ksyms.o tls.o
 
-subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
-               lib/rwsem_64.o
+subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
 subarch-obj-$(CONFIG_MODULES) += kernel/module.o
 
 ldt-y = ../sys-i386/ldt.o
index cb5a57c610751cc58692cc80d1c8b24a29c7182b..4fdb669e84692a53dc07c2b21494ec18bd444e8d 100644 (file)
@@ -227,11 +227,6 @@ config X86_32_LAZY_GS
 
 config KTIME_SCALAR
        def_bool X86_32
-
-config ARCH_CPU_PROBE_RELEASE
-       def_bool y
-       depends on HOTPLUG_CPU
-
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
@@ -627,7 +622,7 @@ config GART_IOMMU
        bool "GART IOMMU support" if EMBEDDED
        default y
        select SWIOTLB
-       depends on X86_64 && PCI && K8_NB
+       depends on X86_64 && PCI
        ---help---
          Support for full DMA access of devices with 32bit memory access only
          on systems with more than 3GB. This is usually needed for USB,
@@ -1241,11 +1236,6 @@ config ARCH_MEMORY_PROBE
        def_bool X86_64
        depends on MEMORY_HOTPLUG
 
-config ILLEGAL_POINTER_VALUE
-       hex
-       default 0 if X86_32
-       default 0xdead000000000000 if X86_64
-
 source "mm/Kconfig"
 
 config HIGHPTE
@@ -2032,7 +2022,7 @@ endif # X86_32
 
 config K8_NB
        def_bool y
-       depends on CPU_SUP_AMD && PCI
+       depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
 
 source "drivers/pcmcia/Kconfig"
 
index 0e56610335655d8d379105bb3f6d50060ffd2efa..f2824fb8c79cad23ad747977e1e32ab4d6b555a1 100644 (file)
@@ -323,7 +323,7 @@ config X86_L1_CACHE_SHIFT
 
 config X86_XADD
        def_bool y
-       depends on X86_64 || !M386
+       depends on X86_32 && !M386
 
 config X86_PPRO_FENCE
        bool "PentiumPro memory ordering errata workaround"
index 14531abdd0ced75cc2f5a60e9ff29fe0308a91b8..f9f472462753c1dd41f1a4a3087769dfdc0af6ec 100644 (file)
@@ -327,6 +327,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        current->mm->free_area_cache = TASK_UNMAPPED_BASE;
        current->mm->cached_hole_size = 0;
 
+       current->mm->mmap = NULL;
        install_exec_creds(bprm);
        current->flags &= ~PF_FORKNOEXEC;
 
index 4edd8eb425cfe4ea29193e4d5afa6657ca743063..5294d84467328f5bb859dcaa2ec84483c355279a 100644 (file)
        /*
         * Reload arg registers from stack in case ptrace changed them.
         * We don't reload %eax because syscall_trace_enter() returned
-        * the %rax value we should see.  Instead, we just truncate that
-        * value to 32 bits again as we did on entry from user mode.
-        * If it's a new value set by user_regset during entry tracing,
-        * this matches the normal truncation of the user-mode value.
-        * If it's -1 to make us punt the syscall, then (u32)-1 is still
-        * an appropriately invalid value.
+        * the value it wants us to use in the table lookup.
         */
        .macro LOAD_ARGS32 offset, _r9=0
        .if \_r9
@@ -65,7 +60,6 @@
        movl \offset+48(%rsp),%edx
        movl \offset+56(%rsp),%esi
        movl \offset+64(%rsp),%edi
-       movl %eax,%eax                  /* zero extension */
        .endm
        
        .macro CFI_STARTPROC32 simple
@@ -159,7 +153,7 @@ ENTRY(ia32_sysenter_target)
        testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        CFI_REMEMBER_STATE
        jnz  sysenter_tracesys
-       cmpq    $(IA32_NR_syscalls-1),%rax
+       cmpl    $(IA32_NR_syscalls-1),%eax
        ja      ia32_badsys
 sysenter_do_call:
        IA32_ARG_FIXUP
@@ -201,7 +195,7 @@ sysexit_from_sys_call:
        movl $AUDIT_ARCH_I386,%edi      /* 1st arg: audit arch */
        call audit_syscall_entry
        movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
-       cmpq $(IA32_NR_syscalls-1),%rax
+       cmpl $(IA32_NR_syscalls-1),%eax
        ja ia32_badsys
        movl %ebx,%edi                  /* reload 1st syscall arg */
        movl RCX-ARGOFFSET(%rsp),%esi   /* reload 2nd syscall arg */
@@ -254,7 +248,7 @@ sysenter_tracesys:
        call    syscall_trace_enter
        LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
-       cmpq    $(IA32_NR_syscalls-1),%rax
+       cmpl    $(IA32_NR_syscalls-1),%eax
        ja      int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
        jmp     sysenter_do_call
        CFI_ENDPROC
@@ -320,7 +314,7 @@ ENTRY(ia32_cstar_target)
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        CFI_REMEMBER_STATE
        jnz   cstar_tracesys
-       cmpq $IA32_NR_syscalls-1,%rax
+       cmpl $IA32_NR_syscalls-1,%eax
        ja  ia32_badsys
 cstar_do_call:
        IA32_ARG_FIXUP 1
@@ -373,7 +367,7 @@ cstar_tracesys:
        LOAD_ARGS32 ARGOFFSET, 1  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
        xchgl %ebp,%r9d
-       cmpq $(IA32_NR_syscalls-1),%rax
+       cmpl $(IA32_NR_syscalls-1),%eax
        ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
        jmp cstar_do_call
 END(ia32_cstar_target)
@@ -431,7 +425,7 @@ ENTRY(ia32_syscall)
        orl   $TS_COMPAT,TI_status(%r10)
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        jnz ia32_tracesys
-       cmpq $(IA32_NR_syscalls-1),%rax
+       cmpl $(IA32_NR_syscalls-1),%eax
        ja ia32_badsys
 ia32_do_call:
        IA32_ARG_FIXUP
@@ -450,7 +444,7 @@ ia32_tracesys:
        call syscall_trace_enter
        LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
-       cmpq $(IA32_NR_syscalls-1),%rax
+       cmpl $(IA32_NR_syscalls-1),%eax
        ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
        jmp ia32_do_call
 END(ia32_syscall)
index 7beb491de20ef70d5bfb37a66fb4d51088ec5e23..2a2cc7a78a81b6b06d460dfee51ad8e9093eb808 100644 (file)
@@ -305,9 +305,6 @@ struct amd_iommu {
        /* capabilities of that IOMMU read from ACPI */
        u32 cap;
 
-       /* flags read from acpi table */
-       u8 acpi_flags;
-
        /*
         * Capability pointer. There could be more than one IOMMU per PCI
         * device function if there are more than one AMD IOMMU capability
@@ -351,15 +348,6 @@ struct amd_iommu {
 
        /* default dma_ops domain for that IOMMU */
        struct dma_ops_domain *default_dom;
-
-       /*
-        * This array is required to work around a potential BIOS bug.
-        * The BIOS may miss to restore parts of the PCI configuration
-        * space when the system resumes from S3. The result is that the
-        * IOMMU does not execute commands anymore which leads to system
-        * failure.
-        */
-       u32 cache_cfg[4];
 };
 
 /*
@@ -481,10 +469,4 @@ static inline void amd_iommu_stats_init(void) { }
 /* some function prototypes */
 extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
 
-static inline bool is_rd890_iommu(struct pci_dev *pdev)
-{
-       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
-              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
-}
-
 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
index 9873a5f64676cd25098797b01ac59d123d50580a..ee1931be6593aba1d344b7644340a1422d7a4f6d 100644 (file)
@@ -17,33 +17,60 @@ struct __xchg_dummy {
 #define __xg(x) ((struct __xchg_dummy *)(x))
 
 /*
- * CMPXCHG8B only writes to the target if we had the previous
- * value in registers, otherwise it acts as a read and gives us the
- * "new previous" value.  That is why there is a loop.  Preloading
- * EDX:EAX is a performance optimization: in the common case it means
- * we need only one locked operation.
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
  *
- * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
- * least an FPU save and/or %cr0.ts manipulation.
- *
- * cmpxchg8b must be used with the lock prefix here to allow the
- * instruction to be executed atomically.  We need to have the reader
- * side to see the coherent 64bit value.
+ * cmpxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
  */
-static inline void set_64bit(volatile u64 *ptr, u64 value)
+static inline void __set_64bit(unsigned long long *ptr,
+                              unsigned int low, unsigned int high)
 {
-       u32 low  = value;
-       u32 high = value >> 32;
-       u64 prev = *ptr;
-
        asm volatile("\n1:\t"
-                    LOCK_PREFIX "cmpxchg8b %0\n\t"
+                    "movl (%0), %%eax\n\t"
+                    "movl 4(%0), %%edx\n\t"
+                    LOCK_PREFIX "cmpxchg8b (%0)\n\t"
                     "jnz 1b"
-                    : "=m" (*ptr), "+A" (prev)
-                    : "b" (low), "c" (high)
-                    : "memory");
+                    : /* no outputs */
+                    : "D"(ptr),
+                      "b"(low),
+                      "c"(high)
+                    : "ax", "dx", "memory");
+}
+
+static inline void __set_64bit_constant(unsigned long long *ptr,
+                                       unsigned long long value)
+{
+       __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
 }
 
+#define ll_low(x)      *(((unsigned int *)&(x)) + 0)
+#define ll_high(x)     *(((unsigned int *)&(x)) + 1)
+
+static inline void __set_64bit_var(unsigned long long *ptr,
+                                  unsigned long long value)
+{
+       __set_64bit(ptr, ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr, value)                  \
+       (__builtin_constant_p((value))          \
+        ? __set_64bit_constant((ptr), (value)) \
+        : __set_64bit_var((ptr), (value)))
+
+#define _set_64bit(ptr, value)                                         \
+       (__builtin_constant_p(value)                                    \
+        ? __set_64bit(ptr, (unsigned int)(value),                      \
+                      (unsigned int)((value) >> 32))                   \
+        : __set_64bit(ptr, ll_low((value)), ll_high((value))))
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  * Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -55,20 +82,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
        switch (size) {
        case 1:
                asm volatile("xchgb %b0,%1"
-                            : "=q" (x), "+m" (*__xg(ptr))
-                            : "0" (x)
+                            : "=q" (x)
+                            : "m" (*__xg(ptr)), "0" (x)
                             : "memory");
                break;
        case 2:
                asm volatile("xchgw %w0,%1"
-                            : "=r" (x), "+m" (*__xg(ptr))
-                            : "0" (x)
+                            : "=r" (x)
+                            : "m" (*__xg(ptr)), "0" (x)
                             : "memory");
                break;
        case 4:
                asm volatile("xchgl %0,%1"
-                            : "=r" (x), "+m" (*__xg(ptr))
-                            : "0" (x)
+                            : "=r" (x)
+                            : "m" (*__xg(ptr)), "0" (x)
                             : "memory");
                break;
        }
@@ -112,21 +139,21 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        unsigned long prev;
        switch (size) {
        case 1:
-               asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "q"(new), "0"(old)
+               asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+                            : "=a"(prev)
+                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 2:
-               asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 4:
-               asm volatile(LOCK_PREFIX "cmpxchgl %2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        }
@@ -145,21 +172,21 @@ static inline unsigned long __sync_cmpxchg(volatile void *ptr,
        unsigned long prev;
        switch (size) {
        case 1:
-               asm volatile("lock; cmpxchgb %b2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "q"(new), "0"(old)
+               asm volatile("lock; cmpxchgb %b1,%2"
+                            : "=a"(prev)
+                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 2:
-               asm volatile("lock; cmpxchgw %w2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("lock; cmpxchgw %w1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 4:
-               asm volatile("lock; cmpxchgl %2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("lock; cmpxchgl %1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        }
@@ -173,21 +200,21 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        unsigned long prev;
        switch (size) {
        case 1:
-               asm volatile("cmpxchgb %b2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "q"(new), "0"(old)
+               asm volatile("cmpxchgb %b1,%2"
+                            : "=a"(prev)
+                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 2:
-               asm volatile("cmpxchgw %w2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("cmpxchgw %w1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 4:
-               asm volatile("cmpxchgl %2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("cmpxchgl %1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        }
@@ -199,10 +226,11 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr,
                                             unsigned long long new)
 {
        unsigned long long prev;
-       asm volatile(LOCK_PREFIX "cmpxchg8b %1"
-                    : "=A"(prev), "+m" (*__xg(ptr))
+       asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+                    : "=A"(prev)
                     : "b"((unsigned long)new),
                       "c"((unsigned long)(new >> 32)),
+                      "m"(*__xg(ptr)),
                       "0"(old)
                     : "memory");
        return prev;
@@ -213,10 +241,11 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
                                                   unsigned long long new)
 {
        unsigned long long prev;
-       asm volatile("cmpxchg8b %1"
-                    : "=A"(prev), "+m"(*__xg(ptr))
+       asm volatile("cmpxchg8b %3"
+                    : "=A"(prev)
                     : "b"((unsigned long)new),
                       "c"((unsigned long)(new >> 32)),
+                      "m"(*__xg(ptr)),
                       "0"(old)
                     : "memory");
        return prev;
index e8cb051b8681e4be97c803076716f8c287ada567..52de72e0de8c882c3673fcff25c315f270db977f 100644 (file)
@@ -8,11 +8,13 @@
 
 #define __xg(x) ((volatile long *)(x))
 
-static inline void set_64bit(volatile u64 *ptr, u64 val)
+static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
 {
        *ptr = val;
 }
 
+#define _set_64bit set_64bit
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  * Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -24,26 +26,26 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
        switch (size) {
        case 1:
                asm volatile("xchgb %b0,%1"
-                            : "=q" (x), "+m" (*__xg(ptr))
-                            : "0" (x)
+                            : "=q" (x)
+                            : "m" (*__xg(ptr)), "0" (x)
                             : "memory");
                break;
        case 2:
                asm volatile("xchgw %w0,%1"
-                            : "=r" (x), "+m" (*__xg(ptr))
-                            : "0" (x)
+                            : "=r" (x)
+                            : "m" (*__xg(ptr)), "0" (x)
                             : "memory");
                break;
        case 4:
                asm volatile("xchgl %k0,%1"
-                            : "=r" (x), "+m" (*__xg(ptr))
-                            : "0" (x)
+                            : "=r" (x)
+                            : "m" (*__xg(ptr)), "0" (x)
                             : "memory");
                break;
        case 8:
                asm volatile("xchgq %0,%1"
-                            : "=r" (x), "+m" (*__xg(ptr))
-                            : "0" (x)
+                            : "=r" (x)
+                            : "m" (*__xg(ptr)), "0" (x)
                             : "memory");
                break;
        }
@@ -64,27 +66,27 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        unsigned long prev;
        switch (size) {
        case 1:
-               asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "q"(new), "0"(old)
+               asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+                            : "=a"(prev)
+                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 2:
-               asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 4:
-               asm volatile(LOCK_PREFIX "cmpxchgl %k2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 8:
-               asm volatile(LOCK_PREFIX "cmpxchgq %2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        }
@@ -103,27 +105,21 @@ static inline unsigned long __sync_cmpxchg(volatile void *ptr,
        unsigned long prev;
        switch (size) {
        case 1:
-               asm volatile("lock; cmpxchgb %b2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "q"(new), "0"(old)
+               asm volatile("lock; cmpxchgb %b1,%2"
+                            : "=a"(prev)
+                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 2:
-               asm volatile("lock; cmpxchgw %w2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("lock; cmpxchgw %w1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 4:
-               asm volatile("lock; cmpxchgl %k2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
-                            : "memory");
-               return prev;
-       case 8:
-               asm volatile("lock; cmpxchgq %2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("lock; cmpxchgl %1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        }
@@ -137,27 +133,27 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        unsigned long prev;
        switch (size) {
        case 1:
-               asm volatile("cmpxchgb %b2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "q"(new), "0"(old)
+               asm volatile("cmpxchgb %b1,%2"
+                            : "=a"(prev)
+                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 2:
-               asm volatile("cmpxchgw %w2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("cmpxchgw %w1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 4:
-               asm volatile("cmpxchgl %k2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("cmpxchgl %k1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        case 8:
-               asm volatile("cmpxchgq %2,%1"
-                            : "=a"(prev), "+m"(*__xg(ptr))
-                            : "r"(new), "0"(old)
+               asm volatile("cmpxchgq %1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
                             : "memory");
                return prev;
        }
index c8c9a74d8ccc1e6f70ab807d0db9a6860a96117d..9a9c7bdc923dee66a411bdae3f30065e9e4ee09f 100644 (file)
@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *)regs->sp - len;
index 1efb1fae606fb41f7808684ac047701b2b05ad9b..9cfc88b97742c45492d814b1d30ae3b60139da90 100644 (file)
 #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
 #define X86_FEATURE_OSVW       (6*32+ 9) /* OS Visible Workaround */
 #define X86_FEATURE_IBS                (6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP                (6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SSE5       (6*32+11) /* SSE-5 */
 #define X86_FEATURE_SKINIT     (6*32+12) /* SKINIT/STGI instructions */
 #define X86_FEATURE_WDT                (6*32+13) /* Watchdog timer */
-#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
index c22a1648113dc5b4a255677a125c90a2c8d9a927..14f9890eb495a31a203fa780b3caf16b018d68ef 100644 (file)
@@ -82,9 +82,6 @@ enum fixed_addresses {
 #endif
        FIX_DBGP_BASE,
        FIX_EARLYCON_MEM_BASE,
-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
-       FIX_OHCI1394_BASE,
-#endif
 #ifdef CONFIG_X86_LOCAL_APIC
        FIX_APIC_BASE,  /* local (CPU) APIC) -- required for SMP or not */
 #endif
@@ -129,6 +126,9 @@ enum fixed_addresses {
        FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
                        (__end_of_permanent_fixed_addresses & 255),
        FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+       FIX_OHCI1394_BASE,
+#endif
 #ifdef CONFIG_X86_32
        FIX_WP_TEST,
 #endif
index 6a63b86c64a136872aa3c522fab6dfc93e5923ad..73739322b6d05675ca8155aac9f5126a5e701460 100644 (file)
@@ -172,7 +172,6 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
 
 extern void iounmap(volatile void __iomem *addr);
 
-extern void set_iounmap_nonlazy(void);
 
 #ifdef CONFIG_X86_32
 # include "io_32.h"
index 5f61f6e0ffdd73039bff49159ff41034a6efdde4..7c7c16cde1f8f3b5ddcf65f6a2f40d537368ad86 100644 (file)
@@ -160,7 +160,6 @@ extern int io_apic_get_redir_entries(int ioapic);
 struct io_apic_irq_attr;
 extern int io_apic_set_pci_routing(struct device *dev, int irq,
                 struct io_apic_irq_attr *irq_attr);
-void setup_IO_APIC_irq_extra(u32 gsi);
 extern int (*ioapic_renumber_irq)(int ioapic, int irq);
 extern void ioapic_init_mappings(void);
 extern void ioapic_insert_resources(void);
index f0746f46990dd17f2cac51493086f53de40ea682..c2d1f3b58e5f1342607280be6a71d434796482dd 100644 (file)
@@ -13,16 +13,11 @@ extern void k8_flush_garts(void);
 extern int k8_scan_nodes(unsigned long start, unsigned long end);
 
 #ifdef CONFIG_K8_NB
-extern int num_k8_northbridges;
-
 static inline struct pci_dev *node_to_k8_nb_misc(int node)
 {
        return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
 }
-
 #else
-#define num_k8_northbridges 0
-
 static inline struct pci_dev *node_to_k8_nb_misc(int node)
 {
        return NULL;
index 5ed59ec92534124348301c54403277f02c8459e2..7c18e1230f5490f1f7a58bd103302ffcb8bf20b1 100644 (file)
@@ -54,23 +54,13 @@ struct x86_emulate_ctxt;
 struct x86_emulate_ops {
        /*
         * read_std: Read bytes of standard (non-emulated/special) memory.
-        *           Used for descriptor reading.
+        *           Used for instruction fetch, stack operations, and others.
         *  @addr:  [IN ] Linear address from which to read.
         *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
         *  @bytes: [IN ] Number of bytes to read from memory.
         */
        int (*read_std)(unsigned long addr, void *val,
-                       unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
-
-       /*
-        * fetch: Read bytes of standard (non-emulated/special) memory.
-        *        Used for instruction fetch.
-        *  @addr:  [IN ] Linear address from which to read.
-        *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
-        *  @bytes: [IN ] Number of bytes to read from memory.
-        */
-       int (*fetch)(unsigned long addr, void *val,
-                       unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+                       unsigned int bytes, struct kvm_vcpu *vcpu);
 
        /*
         * read_emulated: Read bytes from emulated/special memory area.
@@ -178,7 +168,6 @@ struct x86_emulate_ctxt {
 
 /* Execution mode, passed to the emulator. */
 #define X86EMUL_MODE_REAL     0        /* Real mode.             */
-#define X86EMUL_MODE_VM86     1        /* Virtual 8086 mode.     */
 #define X86EMUL_MODE_PROT16   2        /* 16-bit protected mode. */
 #define X86EMUL_MODE_PROT32   4        /* 32-bit protected mode. */
 #define X86EMUL_MODE_PROT64   8        /* 64-bit (long) mode.    */
index 600807b3d3650420c1aac56b10b69cc9abd15deb..d759a1f55084168d2b5006e74b466e4eb24d7533 100644 (file)
@@ -193,7 +193,6 @@ union kvm_mmu_page_role {
                unsigned invalid:1;
                unsigned cr4_pge:1;
                unsigned nxe:1;
-               unsigned cr0_wp:1;
        };
 };
 
@@ -257,8 +256,7 @@ struct kvm_mmu {
        void (*new_cr3)(struct kvm_vcpu *vcpu);
        int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
        void (*free)(struct kvm_vcpu *vcpu);
-       gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
-                           u32 *error);
+       gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
        void (*prefetch_page)(struct kvm_vcpu *vcpu,
                              struct kvm_mmu_page *page);
        int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -603,7 +601,8 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
                    unsigned long value);
 
 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                               int type_bits, int seg);
 
 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
 
@@ -646,10 +645,6 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
@@ -663,7 +658,6 @@ void kvm_disable_tdp(void);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
 int complete_pio(struct kvm_vcpu *vcpu);
-bool kvm_check_iopl(struct kvm_vcpu *vcpu);
 
 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
 
@@ -674,6 +668,20 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
        return (struct kvm_mmu_page *)page_private(page);
 }
 
+static inline u16 kvm_read_fs(void)
+{
+       u16 seg;
+       asm("mov %%fs, %0" : "=g"(seg));
+       return seg;
+}
+
+static inline u16 kvm_read_gs(void)
+{
+       u16 seg;
+       asm("mov %%gs, %0" : "=g"(seg));
+       return seg;
+}
+
 static inline u16 kvm_read_ldt(void)
 {
        u16 ldt;
@@ -681,6 +689,16 @@ static inline u16 kvm_read_ldt(void)
        return ldt;
 }
 
+static inline void kvm_load_fs(u16 sel)
+{
+       asm("mov %0, %%fs" : : "rm"(sel));
+}
+
+static inline void kvm_load_gs(u16 sel)
+{
+       asm("mov %0, %%gs" : : "rm"(sel));
+}
+
 static inline void kvm_load_ldt(u16 sel)
 {
        asm("lldt %0" : : "rm"(sel));
index a7e502fdb16cf44ebea3cc7d7fb9ff8cedc38b38..4ffe09b2ad7511c3e50a1f4465923c2f522dd3d6 100644 (file)
 #define MSR_AMD64_PATCH_LEVEL          0x0000008b
 #define MSR_AMD64_NB_CFG               0xc001001f
 #define MSR_AMD64_PATCH_LOADER         0xc0010020
-#define MSR_AMD64_OSVW_ID_LENGTH       0xc0010140
-#define MSR_AMD64_OSVW_STATUS          0xc0010141
-#define MSR_AMD64_DC_CFG               0xc0011022
 #define MSR_AMD64_IBSFETCHCTL          0xc0011030
 #define MSR_AMD64_IBSFETCHLINAD                0xc0011031
 #define MSR_AMD64_IBSFETCHPHYSAD       0xc0011032
 #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
 #define FAM10H_MMIO_CONF_BASE_MASK     0xfffffff
 #define FAM10H_MMIO_CONF_BASE_SHIFT    20
-#define MSR_FAM10H_NODE_ID             0xc001100c
 
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1                        0xc001001a
 #define MSR_IA32_EBL_CR_POWERON                0x0000002a
 #define MSR_IA32_FEATURE_CONTROL        0x0000003a
 
-#define FEATURE_CONTROL_LOCKED                         (1<<0)
-#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX       (1<<1)
-#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX      (1<<2)
+#define FEATURE_CONTROL_LOCKED         (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED  (1<<2)
 
 #define MSR_IA32_APICBASE              0x0000001b
 #define MSR_IA32_APICBASE_BSP          (1<<8)
index 271de94c3810d9bbdb2c01bbfbb43f221b46ee24..0e8c2a0fd9222d4b75793fb664591de22461e7d7 100644 (file)
@@ -22,11 +22,6 @@ static inline void paravirt_release_pmd(unsigned long pfn) {}
 static inline void paravirt_release_pud(unsigned long pfn) {}
 #endif
 
-/*
- * Flags to use when allocating a user page table page.
- */
-extern gfp_t __userpte_alloc_gfp;
-
 /*
  * Allocate and free page tables.
  */
index 750f1bf1fab18747650710c8e3d64b1fc9049511..01fd9461d323b89827afdc9a5a4c205a6bae3407 100644 (file)
@@ -27,7 +27,6 @@ struct mm_struct;
 struct vm_area_struct;
 
 extern pgd_t swapper_pg_dir[1024];
-extern pgd_t trampoline_pg_dir[1024];
 
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
index 606ede126972e568b992268da3df6f64b7e98480..ca7517d3377634b6251ce727326d06dd30627fc5 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
-#include <asm/asm.h>
 
 struct rwsem_waiter;
 
@@ -56,28 +55,17 @@ extern asmregparm struct rw_semaphore *
 
 /*
  * the semaphore definition
- *
- * The bias values and the counter type limits the number of
- * potential readers/writers to 32767 for 32 bits and 2147483647
- * for 64 bits.
  */
 
-#ifdef CONFIG_X86_64
-# define RWSEM_ACTIVE_MASK             0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK             0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000L
-#define RWSEM_ACTIVE_BIAS              0x00000001L
-#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_UNLOCKED_VALUE           0x00000000
+#define RWSEM_ACTIVE_BIAS              0x00000001
+#define RWSEM_ACTIVE_MASK              0x0000ffff
+#define RWSEM_WAITING_BIAS             (-0x00010000)
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
-typedef signed long rwsem_count_t;
-
 struct rw_semaphore {
-       rwsem_count_t           count;
+       signed long             count;
        spinlock_t              wait_lock;
        struct list_head        wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -117,7 +105,7 @@ do {                                                                \
 static inline void __down_read(struct rw_semaphore *sem)
 {
        asm volatile("# beginning down_read\n\t"
-                    LOCK_PREFIX _ASM_INC "(%1)\n\t"
+                    LOCK_PREFIX "  incl      (%%eax)\n\t"
                     /* adds 0x00000001, returns the old value */
                     "  jns        1f\n"
                     "  call call_rwsem_down_read_failed\n"
@@ -133,14 +121,14 @@ static inline void __down_read(struct rw_semaphore *sem)
  */
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
-       rwsem_count_t result, tmp;
+       __s32 result, tmp;
        asm volatile("# beginning __down_read_trylock\n\t"
-                    "  mov          %0,%1\n\t"
+                    "  movl      %0,%1\n\t"
                     "1:\n\t"
-                    "  mov          %1,%2\n\t"
-                    "  add          %3,%2\n\t"
+                    "  movl         %1,%2\n\t"
+                    "  addl      %3,%2\n\t"
                     "  jle          2f\n\t"
-                    LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
+                    LOCK_PREFIX "  cmpxchgl  %2,%0\n\t"
                     "  jnz          1b\n\t"
                     "2:\n\t"
                     "# ending __down_read_trylock\n\t"
@@ -155,13 +143,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
  */
 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
-       rwsem_count_t tmp;
+       int tmp;
 
        tmp = RWSEM_ACTIVE_WRITE_BIAS;
        asm volatile("# beginning down_write\n\t"
-                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+                    LOCK_PREFIX "  xadd      %%edx,(%%eax)\n\t"
                     /* subtract 0x0000ffff, returns the old value */
-                    "  test      %1,%1\n\t"
+                    "  testl     %%edx,%%edx\n\t"
                     /* was the count 0 before? */
                     "  jz        1f\n"
                     "  call call_rwsem_down_write_failed\n"
@@ -182,9 +170,9 @@ static inline void __down_write(struct rw_semaphore *sem)
  */
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       rwsem_count_t ret = cmpxchg(&sem->count,
-                                   RWSEM_UNLOCKED_VALUE,
-                                   RWSEM_ACTIVE_WRITE_BIAS);
+       signed long ret = cmpxchg(&sem->count,
+                                 RWSEM_UNLOCKED_VALUE,
+                                 RWSEM_ACTIVE_WRITE_BIAS);
        if (ret == RWSEM_UNLOCKED_VALUE)
                return 1;
        return 0;
@@ -195,9 +183,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
  */
 static inline void __up_read(struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
+       __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
        asm volatile("# beginning __up_read\n\t"
-                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+                    LOCK_PREFIX "  xadd      %%edx,(%%eax)\n\t"
                     /* subtracts 1, returns the old value */
                     "  jns        1f\n\t"
                     "  call call_rwsem_wake\n"
@@ -213,18 +201,18 @@ static inline void __up_read(struct rw_semaphore *sem)
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp;
        asm volatile("# beginning __up_write\n\t"
-                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+                    "  movl      %2,%%edx\n\t"
+                    LOCK_PREFIX "  xaddl     %%edx,(%%eax)\n\t"
                     /* tries to transition
                        0xffff0001 -> 0x00000000 */
                     "  jz       1f\n"
                     "  call call_rwsem_wake\n"
                     "1:\n\t"
                     "# ending __up_write\n"
-                    : "+m" (sem->count), "=d" (tmp)
-                    : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
-                    : "memory", "cc");
+                    : "+m" (sem->count)
+                    : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
+                    : "memory", "cc", "edx");
 }
 
 /*
@@ -233,38 +221,33 @@ static inline void __up_write(struct rw_semaphore *sem)
 static inline void __downgrade_write(struct rw_semaphore *sem)
 {
        asm volatile("# beginning __downgrade_write\n\t"
-                    LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
-                    /*
-                     * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
-                     *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
-                     */
+                    LOCK_PREFIX "  addl      %2,(%%eax)\n\t"
+                    /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
                     "  jns       1f\n\t"
                     "  call call_rwsem_downgrade_wake\n"
                     "1:\n\t"
                     "# ending __downgrade_write\n"
                     : "+m" (sem->count)
-                    : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+                    : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
                     : "memory", "cc");
 }
 
 /*
  * implement atomic add functionality
  */
-static inline void rwsem_atomic_add(rwsem_count_t delta,
-                                   struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
 {
-       asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
+       asm volatile(LOCK_PREFIX "addl %1,%0"
                     : "+m" (sem->count)
-                    : "er" (delta));
+                    : "ir" (delta));
 }
 
 /*
  * implement exchange and add functionality
  */
-static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
-                                               struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp = delta;
+       int tmp = delta;
 
        asm volatile(LOCK_PREFIX "xadd %0,%1"
                     : "+r" (tmp), "+m" (sem->count)
index 4c2f63c7fc1b2ec071d3d32465f389a6d9df24dd..1e796782cd7b9e606e59f2f5c39b4819fb9131c7 100644 (file)
@@ -50,7 +50,7 @@ struct smp_ops {
        void (*smp_prepare_cpus)(unsigned max_cpus);
        void (*smp_cpus_done)(unsigned max_cpus);
 
-       void (*stop_other_cpus)(int wait);
+       void (*smp_send_stop)(void);
        void (*smp_send_reschedule)(int cpu);
 
        int (*cpu_up)(unsigned cpu);
@@ -73,12 +73,7 @@ extern struct smp_ops smp_ops;
 
 static inline void smp_send_stop(void)
 {
-       smp_ops.stop_other_cpus(0);
-}
-
-static inline void stop_other_cpus(void)
-{
-       smp_ops.stop_other_cpus(1);
+       smp_ops.smp_send_stop();
 }
 
 static inline void smp_prepare_boot_cpu(void)
@@ -140,8 +135,6 @@ int native_cpu_disable(void);
 void native_cpu_die(unsigned int cpu);
 void native_play_dead(void);
 void play_dead_common(void);
-void wbinvd_on_cpu(int cpu);
-int wbinvd_on_all_cpus(void);
 
 void native_send_call_func_ipi(const struct cpumask *mask);
 void native_send_call_func_single_ipi(int cpu);
@@ -154,13 +147,6 @@ static inline int num_booting_cpus(void)
 {
        return cpumask_weight(cpu_callout_mask);
 }
-#else /* !CONFIG_SMP */
-#define wbinvd_on_cpu(cpu)     wbinvd()
-static inline int wbinvd_on_all_cpus(void)
-{
-       wbinvd();
-       return 0;
-}
 #endif /* CONFIG_SMP */
 
 extern unsigned disabled_cpus __cpuinitdata;
index fd921c3a68414e341fe8fffa8b1df1326eba2ba7..48dcfa62ea07eb5985ef61c526ed767da3795283 100644 (file)
@@ -15,8 +15,6 @@ static inline int arch_prepare_suspend(void) { return 0; }
 struct saved_context {
        u16 es, fs, gs, ss;
        unsigned long cr0, cr2, cr3, cr4;
-       u64 misc_enable;
-       bool misc_enable_saved;
        struct desc_ptr gdt;
        struct desc_ptr idt;
        u16 ldt;
index 8d942afae681bec8fe2c47a9bf57955dbc4317b0..06284f42b7599a8f46669fc3e4f6c9d8bbea7eb7 100644 (file)
@@ -27,8 +27,6 @@ struct saved_context {
        u16 ds, es, fs, gs, ss;
        unsigned long gs_base, gs_kernel_base, fs_base;
        unsigned long cr0, cr2, cr3, cr4, cr8;
-       u64 misc_enable;
-       bool misc_enable_saved;
        unsigned long efer;
        u16 gdt_pad;
        u16 gdt_limit;
index e0fbf294536c6e96cb7b0169cd263cfe1bd8133d..f08f973748922b26a2417ed70db47fec2b54b009 100644 (file)
@@ -449,7 +449,7 @@ void stop_this_cpu(void *dummy);
  *
  * (Could use an alternative three way for this if there was one.)
  */
-static __always_inline void rdtsc_barrier(void)
+static inline void rdtsc_barrier(void)
 {
        alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
        alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
index ebace684c476effa7488ca2f678cb5e58fa53d6e..90f06c25221d792dab33f662c25012b7d0c39f59 100644 (file)
@@ -13,18 +13,15 @@ extern unsigned char *trampoline_base;
 
 extern unsigned long init_rsp;
 extern unsigned long initial_code;
-extern unsigned long initial_page_table;
 extern unsigned long initial_gs;
 
 #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 #define TRAMPOLINE_BASE 0x6000
 
 extern unsigned long setup_trampoline(void);
-extern void __init setup_trampoline_page_table(void);
 extern void __init reserve_trampoline_memory(void);
 #else
-static inline void setup_trampoline_page_table(void) {}
-static inline void reserve_trampoline_memory(void) {}
+static inline void reserve_trampoline_memory(void) {};
 #endif /* CONFIG_X86_TRAMPOLINE */
 
 #endif /* __ASSEMBLY__ */
index 1ca132fc0d039cbc8c3b7f605fa4dbbd91db7291..c0427295e8f58956e32f833c78c9ad75676778d2 100644 (file)
@@ -59,7 +59,5 @@ extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 
 extern int notsc_setup(char *);
-extern void save_sched_clock_state(void);
-extern void restore_sched_clock_state(void);
 
 #endif /* _ASM_X86_TSC_H */
index d1911abac18056def432f5549d8ff882c03e6287..d8e5d0cdd678d3b4396c0e7f859b7c3f6ac0d212 100644 (file)
@@ -11,8 +11,6 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_tsc.o = -pg
 CFLAGS_REMOVE_rtc.o = -pg
 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
-CFLAGS_REMOVE_pvclock.o = -pg
-CFLAGS_REMOVE_kvmclock.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 endif
index 23c2da87df19c382c476fe798b5cdb073065b96c..67e929b89875bea8c06c5b8c454a43332862eb46 100644 (file)
@@ -446,12 +446,6 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
 {
        *irq = gsi;
-
-#ifdef CONFIG_X86_IO_APIC
-       if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
-               setup_IO_APIC_irq_extra(gsi);
-#endif
-
        return 0;
 }
 
@@ -479,8 +473,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
                plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
        }
 #endif
-       irq = plat_gsi;
-
+       acpi_gsi_to_irq(plat_gsi, &irq);
        return irq;
 }
 
@@ -1191,6 +1184,9 @@ static void __init acpi_process_madt(void)
                if (!error) {
                        acpi_lapic = 1;
 
+#ifdef CONFIG_X86_BIGSMP
+                       generic_bigsmp_probe();
+#endif
                        /*
                         * Parse MADT IO-APIC entries
                         */
@@ -1200,6 +1196,8 @@ static void __init acpi_process_madt(void)
                                acpi_ioapic = 1;
 
                                smp_found_config = 1;
+                               if (apic->setup_apic_routing)
+                                       apic->setup_apic_routing();
                        }
                }
                if (error == -EINVAL) {
@@ -1348,6 +1346,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
                     },
         },
+       {
+        .callback = force_acpi_ht,
+        .ident = "ASUS P2B-DS",
+        .matches = {
+                    DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                    DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
+                    },
+        },
        {
         .callback = force_acpi_ht,
         .ident = "ASUS CUR-DLS",
index fb7a5f052e2b8766d11115e3f7fc174fadf6ac2f..2e837f5080fe56d3ad0c87d308564cf157147624 100644 (file)
@@ -145,15 +145,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
                percpu_entry->states[cx->index].eax = cx->address;
                percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
        }
-
-       /*
-        * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
-        * then we should skip checking BM_STS for this C-state.
-        * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
-        */
-       if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
-               cx->bm_sts_skip = 1;
-
        return retval;
 }
 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
index 7cd33f75a69c4c44360528bc44b00418cb1f29de..23fc9fe1625f58494f2ff16780df93074b86916c 100644 (file)
@@ -544,7 +544,7 @@ static void flush_devices_by_domain(struct protection_domain *domain)
 
        for (i = 0; i <= amd_iommu_last_bdf; ++i) {
                if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
-                   (domain != NULL && amd_iommu_pd_table[i] != domain))
+                   (amd_iommu_pd_table[i] != domain))
                        continue;
 
                iommu = amd_iommu_rlookup_table[i];
@@ -1688,7 +1688,6 @@ static void __unmap_single(struct amd_iommu *iommu,
                           size_t size,
                           int dir)
 {
-       dma_addr_t flush_addr;
        dma_addr_t i, start;
        unsigned int pages;
 
@@ -1696,7 +1695,6 @@ static void __unmap_single(struct amd_iommu *iommu,
            (dma_addr + size > dma_dom->aperture_size))
                return;
 
-       flush_addr = dma_addr;
        pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        dma_addr &= PAGE_MASK;
        start = dma_addr;
@@ -1711,7 +1709,7 @@ static void __unmap_single(struct amd_iommu *iommu,
        dma_ops_free_addresses(dma_dom, dma_addr, pages);
 
        if (amd_iommu_unmap_flush || dma_dom->need_flush) {
-               iommu_flush_pages(iommu, dma_dom->domain.id, flush_addr, size);
+               iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
                dma_dom->need_flush = false;
        }
 }
@@ -2241,7 +2239,9 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
 
        free_pagetable(domain);
 
-       protection_domain_free(domain);
+       domain_id_free(domain->id);
+
+       kfree(domain);
 
        dom->priv = NULL;
 }
index 400be996de7b0e5d49edadc9cd4ff4336d620cef..362ab88c73ac0bedd5f4ce4765ebe596d8c0924b 100644 (file)
@@ -622,13 +622,6 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
        iommu->last_device = calc_devid(MMIO_GET_BUS(range),
                                        MMIO_GET_LD(range));
        iommu->evt_msi_num = MMIO_MSI_NUM(misc);
-
-       if (is_rd890_iommu(iommu->dev)) {
-               pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
-               pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
-               pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
-               pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
-       }
 }
 
 /*
@@ -646,9 +639,29 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
        struct ivhd_entry *e;
 
        /*
-        * First save the recommended feature enable bits from ACPI
+        * First set the recommended feature enable bits from ACPI
+        * into the IOMMU control registers
+        */
+       h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+       h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+       h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+       h->flags & IVHD_FLAG_ISOC_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+       /*
+        * make IOMMU memory accesses cache coherent
         */
-       iommu->acpi_flags = h->flags;
+       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
 
        /*
         * Done. Now parse the device entries
@@ -1076,40 +1089,6 @@ static void init_device_table(void)
        }
 }
 
-static void iommu_init_flags(struct amd_iommu *iommu)
-{
-       iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
-               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
-       iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
-       iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
-       iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
-               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
-       /*
-        * make IOMMU memory accesses cache coherent
-        */
-       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
-}
-
-static void iommu_apply_quirks(struct amd_iommu *iommu)
-{
-       if (is_rd890_iommu(iommu->dev)) {
-               pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
-               pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
-               pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
-               pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
-       }
-}
-
 /*
  * This function finally enables all IOMMUs found in the system after
  * they have been initialized
@@ -1120,8 +1099,6 @@ static void enable_iommus(void)
 
        for_each_iommu(iommu) {
                iommu_disable(iommu);
-               iommu_apply_quirks(iommu);
-               iommu_init_flags(iommu);
                iommu_set_device_table(iommu);
                iommu_enable_command_buffer(iommu);
                iommu_enable_event_buffer(iommu);
@@ -1307,8 +1284,6 @@ int __init amd_iommu_init(void)
        if (ret)
                goto free;
 
-       enable_iommus();
-
        if (iommu_pass_through)
                ret = amd_iommu_init_passthrough();
        else
@@ -1319,6 +1294,8 @@ int __init amd_iommu_init(void)
 
        amd_iommu_init_api();
 
+       enable_iommus();
+
        if (iommu_pass_through)
                goto out;
 
@@ -1337,8 +1314,6 @@ out:
        return ret;
 
 free:
-       disable_iommus();
-
        free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
                   get_order(MAX_DOMAIN_ID/8));
 
index 082089ec5594f365d3a41f272d7d86d1735c0b72..128111d8ffe0de7ffcdaf0891221ff37d0ae61f8 100644 (file)
@@ -389,7 +389,6 @@ void __init gart_iommu_hole_init(void)
        for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
                int bus;
                int dev_base, dev_limit;
-               u32 ctl;
 
                bus = bus_dev_ranges[i].bus;
                dev_base = bus_dev_ranges[i].dev_base;
@@ -402,19 +401,7 @@ void __init gart_iommu_hole_init(void)
                        iommu_detected = 1;
                        gart_iommu_aperture = 1;
 
-                       ctl = read_pci_config(bus, slot, 3,
-                                             AMD64_GARTAPERTURECTL);
-
-                       /*
-                        * Before we do anything else disable the GART. It may
-                        * still be enabled if we boot into a crash-kernel here.
-                        * Reconfiguring the GART while it is enabled could have
-                        * unknown side-effects.
-                        */
-                       ctl &= ~GARTEN;
-                       write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
-
-                       aper_order = (ctl >> 1) & 7;
+                       aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
                        aper_size = (32 * 1024 * 1024) << aper_order;
                        aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
                        aper_base <<= 25;
index 6702ab74c58c7dfeea0c6743431e6b9259fe4927..c86dbcf39e8d65142974b10a56ff752cc782ef5f 100644 (file)
@@ -51,7 +51,6 @@
 #include <asm/smp.h>
 #include <asm/mce.h>
 #include <asm/kvm_para.h>
-#include <asm/tsc.h>
 
 unsigned int num_processors;
 
@@ -942,7 +941,7 @@ void disable_local_APIC(void)
        unsigned int value;
 
        /* APIC hasn't been mapped yet */
-       if (!x2apic_mode && !apic_phys)
+       if (!apic_phys)
                return;
 
        clear_local_APIC();
@@ -1173,13 +1172,8 @@ static void __cpuinit lapic_setup_esr(void)
  */
 void __cpuinit setup_local_APIC(void)
 {
-       unsigned int value, queued;
-       int i, j, acked = 0;
-       unsigned long long tsc = 0, ntsc;
-       long long max_loops = cpu_khz;
-
-       if (cpu_has_tsc)
-               rdtscll(tsc);
+       unsigned int value;
+       int i, j;
 
        if (disable_apic) {
                arch_disable_smp_support();
@@ -1231,32 +1225,13 @@ void __cpuinit setup_local_APIC(void)
         * the interrupt. Hence a vector might get locked. It was noticed
         * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
         */
-       do {
-               queued = 0;
-               for (i = APIC_ISR_NR - 1; i >= 0; i--)
-                       queued |= apic_read(APIC_IRR + i*0x10);
-
-               for (i = APIC_ISR_NR - 1; i >= 0; i--) {
-                       value = apic_read(APIC_ISR + i*0x10);
-                       for (j = 31; j >= 0; j--) {
-                               if (value & (1<<j)) {
-                                       ack_APIC_irq();
-                                       acked++;
-                               }
-                       }
+       for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+               value = apic_read(APIC_ISR + i*0x10);
+               for (j = 31; j >= 0; j--) {
+                       if (value & (1<<j))
+                               ack_APIC_irq();
                }
-               if (acked > 256) {
-                       printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
-                              acked);
-                       break;
-               }
-               if (cpu_has_tsc) {
-                       rdtscll(ntsc);
-                       max_loops = (cpu_khz << 10) - (ntsc - tsc);
-               } else
-                       max_loops--;
-       } while (queued && max_loops > 0);
-       WARN_ON(max_loops <= 0);
+       }
 
        /*
         * Now that we are all set up, enable the APIC
@@ -1689,8 +1664,8 @@ int __init APIC_init_uniprocessor(void)
        }
 #endif
 
-#ifndef CONFIG_SMP
        enable_IR_x2apic();
+#ifdef CONFIG_X86_64
        default_setup_apic_routing();
 #endif
 
@@ -1940,6 +1915,18 @@ void __cpuinit generic_processor_info(int apicid, int version)
        if (apicid > max_physical_apicid)
                max_physical_apicid = apicid;
 
+#ifdef CONFIG_X86_32
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               if (num_processors > 8)
+                       def_to_bigsmp = 1;
+               break;
+       case X86_VENDOR_AMD:
+               if (max_physical_apicid >= 8)
+                       def_to_bigsmp = 1;
+       }
+#endif
+
 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
        early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
        early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
index d850eeb19243c7e582a289ddc6c8bf4d878092b6..c107e837ed7d6a8ff5b66a749fd0beb1e241772f 100644 (file)
@@ -332,19 +332,14 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
 
        old_cfg = old_desc->chip_data;
 
-       cfg->vector = old_cfg->vector;
-       cfg->move_in_progress = old_cfg->move_in_progress;
-       cpumask_copy(cfg->domain, old_cfg->domain);
-       cpumask_copy(cfg->old_domain, old_cfg->old_domain);
+       memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
 
        init_copy_irq_2_pin(old_cfg, cfg, node);
 }
 
-static void free_irq_cfg(struct irq_cfg *cfg)
+static void free_irq_cfg(struct irq_cfg *old_cfg)
 {
-       free_cpumask_var(cfg->domain);
-       free_cpumask_var(cfg->old_domain);
-       kfree(cfg);
+       kfree(old_cfg);
 }
 
 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -1410,7 +1405,6 @@ int setup_ioapic_entry(int apic_id, int irq,
                irte.dlvry_mode = apic->irq_delivery_mode;
                irte.vector = vector;
                irte.dest_id = IRTE_DEST(destination);
-               irte.redir_hint = 1;
 
                /* Set source-id of interrupt request */
                set_ioapic_sid(&irte, apic_id);
@@ -1490,7 +1484,7 @@ static struct {
 
 static void __init setup_IO_APIC_irqs(void)
 {
-       int apic_id, pin, idx, irq;
+       int apic_id = 0, pin, idx, irq;
        int notcon = 0;
        struct irq_desc *desc;
        struct irq_cfg *cfg;
@@ -1498,7 +1492,14 @@ static void __init setup_IO_APIC_irqs(void)
 
        apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
-       for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
+#ifdef CONFIG_ACPI
+       if (!acpi_disabled && acpi_ioapic) {
+               apic_id = mp_find_ioapic(0);
+               if (apic_id < 0)
+                       apic_id = 0;
+       }
+#endif
+
        for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
                idx = find_irq_entry(apic_id, pin, mp_INT);
                if (idx == -1) {
@@ -1520,9 +1521,6 @@ static void __init setup_IO_APIC_irqs(void)
 
                irq = pin_2_irq(idx, apic_id, pin);
 
-               if ((apic_id > 0) && (irq > 16))
-                       continue;
-
                /*
                 * Skip the timer IRQ if there's a quirk handler
                 * installed and if it returns 1:
@@ -1551,56 +1549,6 @@ static void __init setup_IO_APIC_irqs(void)
                        " (apicid-pin) not connected\n");
 }
 
-/*
- * for the gsit that is not in first ioapic
- * but could not use acpi_register_gsi()
- * like some special sci in IBM x3330
- */
-void setup_IO_APIC_irq_extra(u32 gsi)
-{
-       int apic_id = 0, pin, idx, irq;
-       int node = cpu_to_node(boot_cpu_id);
-       struct irq_desc *desc;
-       struct irq_cfg *cfg;
-
-       /*
-        * Convert 'gsi' to 'ioapic.pin'.
-        */
-       apic_id = mp_find_ioapic(gsi);
-       if (apic_id < 0)
-               return;
-
-       pin = mp_find_ioapic_pin(apic_id, gsi);
-       idx = find_irq_entry(apic_id, pin, mp_INT);
-       if (idx == -1)
-               return;
-
-       irq = pin_2_irq(idx, apic_id, pin);
-#ifdef CONFIG_SPARSE_IRQ
-       desc = irq_to_desc(irq);
-       if (desc)
-               return;
-#endif
-       desc = irq_to_desc_alloc_node(irq, node);
-       if (!desc) {
-               printk(KERN_INFO "can not get irq_desc for %d\n", irq);
-               return;
-       }
-
-       cfg = desc->chip_data;
-       add_pin_to_irq_node(cfg, node, apic_id, pin);
-
-       if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
-               pr_debug("Pin %d-%d already programmed\n",
-                        mp_ioapics[apic_id].apicid, pin);
-               return;
-       }
-       set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
-
-       setup_IO_APIC_irq(apic_id, pin, irq, desc,
-                       irq_trigger(idx), irq_polarity(idx));
-}
-
 /*
  * Set up the timer pin, possibly with the 8259A-master behind.
  */
@@ -1742,8 +1690,6 @@ __apicdebuginit(void) print_IO_APIC(void)
                struct irq_pin_list *entry;
 
                cfg = desc->chip_data;
-               if (!cfg)
-                       continue;
                entry = cfg->irq_2_pin;
                if (!entry)
                        continue;
@@ -3219,9 +3165,12 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
        }
        spin_unlock_irqrestore(&vector_lock, flags);
 
-       if (irq > 0)
-               dynamic_irq_init_keep_chip_data(irq);
-
+       if (irq > 0) {
+               dynamic_irq_init(irq);
+               /* restore it, in case dynamic_irq_init clear it */
+               if (desc_new)
+                       desc_new->chip_data = cfg_new;
+       }
        return irq;
 }
 
@@ -3244,12 +3193,17 @@ void destroy_irq(unsigned int irq)
 {
        unsigned long flags;
        struct irq_cfg *cfg;
+       struct irq_desc *desc;
 
-       dynamic_irq_cleanup_keep_chip_data(irq);
+       /* store it, in case dynamic_irq_cleanup clear it */
+       desc = irq_to_desc(irq);
+       cfg = desc->chip_data;
+       dynamic_irq_cleanup(irq);
+       /* connect back irq_cfg */
+       desc->chip_data = cfg;
 
        free_irte(irq);
        spin_lock_irqsave(&vector_lock, flags);
-       cfg = irq_to_desc(irq)->chip_data;
        __clear_irq_vector(irq, cfg);
        spin_unlock_irqrestore(&vector_lock, flags);
 }
@@ -3290,7 +3244,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
                irte.dlvry_mode = apic->irq_delivery_mode;
                irte.vector = cfg->vector;
                irte.dest_id = IRTE_DEST(dest);
-               irte.redir_hint = 1;
 
                /* Set source-id of interrupt request */
                set_msi_sid(&irte, pdev);
@@ -3345,7 +3298,7 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
 
        cfg = desc->chip_data;
 
-       get_cached_msi_msg_desc(desc, &msg);
+       read_msi_msg_desc(desc, &msg);
 
        msg.data &= ~MSI_DATA_VECTOR_MASK;
        msg.data |= MSI_DATA_VECTOR(cfg->vector);
@@ -4088,23 +4041,27 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
 #ifdef CONFIG_SMP
 void __init setup_ioapic_dest(void)
 {
-       int pin, ioapic, irq, irq_entry;
+       int pin, ioapic = 0, irq, irq_entry;
        struct irq_desc *desc;
        const struct cpumask *mask;
 
        if (skip_ioapic_setup == 1)
                return;
 
-       for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
+#ifdef CONFIG_ACPI
+       if (!acpi_disabled && acpi_ioapic) {
+               ioapic = mp_find_ioapic(0);
+               if (ioapic < 0)
+                       ioapic = 0;
+       }
+#endif
+
        for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
                irq_entry = find_irq_entry(ioapic, pin, mp_INT);
                if (irq_entry == -1)
                        continue;
                irq = pin_2_irq(irq_entry, ioapic, pin);
 
-               if ((ioapic > 0) && (irq > 16))
-                       continue;
-
                desc = irq_to_desc(irq);
 
                /*
index 88b9d22c84328746210fae7c27fde54f2a699160..0c0182cc947d0ac95faf54b76e83b1472c67af15 100644 (file)
@@ -53,31 +53,6 @@ static int __init print_ipi_mode(void)
 late_initcall(print_ipi_mode);
 
 void default_setup_apic_routing(void)
-{
-       int version = apic_version[boot_cpu_physical_apicid];
-
-       if (num_possible_cpus() > 8) {
-               switch (boot_cpu_data.x86_vendor) {
-               case X86_VENDOR_INTEL:
-                       if (!APIC_XAPIC(version)) {
-                               def_to_bigsmp = 0;
-                               break;
-                       }
-                       /* If P4 and above fall through */
-               case X86_VENDOR_AMD:
-                       def_to_bigsmp = 1;
-               }
-       }
-
-#ifdef CONFIG_X86_BIGSMP
-       generic_bigsmp_probe();
-#endif
-
-       if (apic->setup_apic_routing)
-               apic->setup_apic_routing();
-}
-
-void setup_apic_flat_routing(void)
 {
 #ifdef CONFIG_X86_IO_APIC
        printk(KERN_INFO
@@ -128,7 +103,7 @@ struct apic apic_default = {
        .init_apic_ldr                  = default_init_apic_ldr,
 
        .ioapic_phys_id_map             = default_ioapic_phys_id_map,
-       .setup_apic_routing             = setup_apic_flat_routing,
+       .setup_apic_routing             = default_setup_apic_routing,
        .multi_timer_check              = NULL,
        .apicid_to_node                 = default_apicid_to_node,
        .cpu_to_logical_apicid          = default_cpu_to_logical_apicid,
index 4c56f544f1676a06ce9bca808d1c6fae943840d2..c4cbd3080c1c36e0c798d10a4652427fa4e09603 100644 (file)
@@ -67,8 +67,17 @@ void __init default_setup_apic_routing(void)
        }
 #endif
 
-       if (apic == &apic_flat && num_possible_cpus() > 8)
-               apic = &apic_physflat;
+       if (apic == &apic_flat) {
+               switch (boot_cpu_data.x86_vendor) {
+               case X86_VENDOR_INTEL:
+                       if (num_processors > 8)
+                               apic = &apic_physflat;
+                       break;
+               case X86_VENDOR_AMD:
+                       if (max_physical_apicid >= 8)
+                               apic = &apic_physflat;
+               }
+       }
 
        printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
 
index c7ee9c9e02454fa5ab9bbfc025b8948a187d8847..9ee87cfe0859fa042550a32ece8eed5ce2349324 100644 (file)
@@ -595,11 +595,9 @@ void __init uv_system_init(void)
                for (j = 0; j < 64; j++) {
                        if (!test_bit(j, &present))
                                continue;
-                       pnode = (i * 64 + j);
-                       uv_blade_info[blade].pnode = pnode;
+                       uv_blade_info[blade].pnode = (i * 64 + j);
                        uv_blade_info[blade].nr_possible_cpus = 0;
                        uv_blade_info[blade].nr_online_cpus = 0;
-                       max_pnode = max(pnode, max_pnode);
                        blade++;
                }
        }
@@ -637,6 +635,10 @@ void __init uv_system_init(void)
                uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
                uv_node_to_blade[nid] = blade;
                uv_cpu_to_blade[cpu] = blade;
+               max_pnode = max(pnode, max_pnode);
+
+               printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
+                       cpu, apicid, pnode, nid, lcpu, blade);
        }
 
        /* Add blade/pnode info for nodes without cpus */
@@ -648,6 +650,7 @@ void __init uv_system_init(void)
                pnode = (paddr >> m_val) & pnode_mask;
                blade = boot_pnode_to_blade(pnode);
                uv_node_to_blade[nid] = blade;
+               max_pnode = max(pnode, max_pnode);
        }
 
        map_gru_high(max_pnode);
index 3940fee7ea9fb3fa4f90eaee674c6ce32fa2ba16..c910a716a71ce103b878154b24b2813917624716 100644 (file)
@@ -254,36 +254,59 @@ static int __cpuinit nearby_node(int apicid)
 
 /*
  * Fixup core topology information for AMD multi-node processors.
- * Assumption: Number of cores in each internal node is the same.
+ * Assumption 1: Number of cores in each internal node is the same.
+ * Assumption 2: Mixed systems with both single-node and dual-node
+ *               processors are not supported.
  */
 #ifdef CONFIG_X86_HT
 static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
 {
-       unsigned long long value;
-       u32 nodes, cores_per_node;
+#ifdef CONFIG_PCI
+       u32 t, cpn;
+       u8 n, n_id;
        int cpu = smp_processor_id();
 
-       if (!cpu_has(c, X86_FEATURE_NODEID_MSR))
-               return;
-
        /* fixup topology information only once for a core */
        if (cpu_has(c, X86_FEATURE_AMD_DCM))
                return;
 
-       rdmsrl(MSR_FAM10H_NODE_ID, value);
-
-       nodes = ((value >> 3) & 7) + 1;
-       if (nodes == 1)
+       /* check for multi-node processor on boot cpu */
+       t = read_pci_config(0, 24, 3, 0xe8);
+       if (!(t & (1 << 29)))
                return;
 
        set_cpu_cap(c, X86_FEATURE_AMD_DCM);
-       cores_per_node = c->x86_max_cores / nodes;
 
-       /* store NodeID, use llc_shared_map to store sibling info */
-       per_cpu(cpu_llc_id, cpu) = value & 7;
+       /* cores per node: each internal node has half the number of cores */
+       cpn = c->x86_max_cores >> 1;
 
-       /* fixup core id to be in range from 0 to (cores_per_node - 1) */
-       c->cpu_core_id = c->cpu_core_id % cores_per_node;
+       /* even-numbered NB_id of this dual-node processor */
+       n = c->phys_proc_id << 1;
+
+       /*
+        * determine internal node id and assign cores fifty-fifty to
+        * each node of the dual-node processor
+        */
+       t = read_pci_config(0, 24 + n, 3, 0xe8);
+       n = (t>>30) & 0x3;
+       if (n == 0) {
+               if (c->cpu_core_id < cpn)
+                       n_id = 0;
+               else
+                       n_id = 1;
+       } else {
+               if (c->cpu_core_id < cpn)
+                       n_id = 1;
+               else
+                       n_id = 0;
+       }
+
+       /* compute entire NodeID, use llc_shared_map to store sibling info */
+       per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
+
+       /* fixup core id to be in range from 0 to cpn */
+       c->cpu_core_id = c->cpu_core_id % cpn;
+#endif
 }
 #endif
 
index 4e34d10b841f35a46e536acc962b177c29747ce4..cc25c2b4a567c2ca3e020127cefe87b2778f02ee 100644 (file)
@@ -540,7 +540,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
        }
 }
 
-void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 tfms, xlvl;
        u32 ebx;
@@ -579,7 +579,6 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
        if (c->extended_cpuid_level >= 0x80000007)
                c->x86_power = cpuid_edx(0x80000007);
 
-       init_scattered_cpuid_features(c);
 }
 
 static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -728,6 +727,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
 
        get_model_name(c); /* Default name */
 
+       init_scattered_cpuid_features(c);
        detect_nopl(c);
 }
 
index eb19c0800044b6b2b09e072e4679537f7823715f..6de9a908e4008bf6d99e56cb8a9ef909b1f3949c 100644 (file)
@@ -33,6 +33,5 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
                            *const __x86_cpu_dev_end[];
 
 extern void display_cacheinfo(struct cpuinfo_x86 *c);
-extern void get_cpu_cap(struct cpuinfo_x86 *c);
 
 #endif
index acb0115f43dc0ed6b228c41d18b06082c8c5ac81..8b581d3905cb47214af854582e24a379fac444f3 100644 (file)
@@ -741,7 +741,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
                per_cpu(drv_data, policy->cpu) = NULL;
                acpi_processor_unregister_performance(data->acpi_data,
                                                      policy->cpu);
-               kfree(data->freq_table);
                kfree(data);
        }
 
index 5e92606c4cf8ecc58a8396549b5f6c3198bdd69d..ab1cd30340f52d87806a0e3c0cecf8bccbf92bae 100644 (file)
@@ -929,8 +929,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
                powernow_table[i].index = index;
 
                /* Frequency may be rounded for these */
-               if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
-                                || boot_cpu_data.x86 == 0x11) {
+               if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
                        powernow_table[i].frequency =
                                freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
                } else
index 6a77ccaed1aa8198c7b7e7bb56d31ecd0922120b..a2a03cf4a489966e793c9b14058f462d60a7e2eb 100644 (file)
@@ -40,7 +40,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                        misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
                        c->cpuid_level = cpuid_eax(0);
-                       get_cpu_cap(c);
                }
        }
 
@@ -48,27 +47,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                (c->x86 == 0x6 && c->x86_model >= 0x0e))
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 
-       /*
-        * Atom erratum AAE44/AAF40/AAG38/AAH41:
-        *
-        * A race condition between speculative fetches and invalidating
-        * a large page.  This is worked around in microcode, but we
-        * need the microcode to have already been loaded... so if it is
-        * not, recommend a BIOS update and disable large pages.
-        */
-       if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
-               u32 ucode, junk;
-
-               wrmsr(MSR_IA32_UCODE_REV, 0, 0);
-               sync_core();
-               rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
-
-               if (ucode < 0x20e) {
-                       printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
-                       clear_cpu_cap(c, X86_FEATURE_PSE);
-               }
-       }
-
 #ifdef CONFIG_X86_64
        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 #else
@@ -92,8 +70,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
-               if (!check_tsc_unstable())
-                       sched_clock_stable = 1;
+               sched_clock_stable = 1;
        }
 
        /*
index 417990f04b5d2e66cd5d06efd35b8fc5d4a3ffd1..8178d03529354951e9fc74c6214e1b7c1f619053 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/processor.h>
 #include <linux/smp.h>
 #include <asm/k8.h>
-#include <asm/smp.h>
 
 #define LVL_1_INST     1
 #define LVL_1_DATA     2
@@ -151,8 +150,7 @@ struct _cpuid4_info {
        union _cpuid4_leaf_ebx ebx;
        union _cpuid4_leaf_ecx ecx;
        unsigned long size;
-       bool can_disable;
-       unsigned int l3_indices;
+       unsigned long can_disable;
        DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
 };
 
@@ -162,8 +160,7 @@ struct _cpuid4_info_regs {
        union _cpuid4_leaf_ebx ebx;
        union _cpuid4_leaf_ecx ecx;
        unsigned long size;
-       bool can_disable;
-       unsigned int l3_indices;
+       unsigned long can_disable;
 };
 
 unsigned short                 num_cache_leaves;
@@ -293,36 +290,6 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
                (ebx->split.ways_of_associativity + 1) - 1;
 }
 
-struct _cache_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct _cpuid4_info *, char *);
-       ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
-};
-
-#ifdef CONFIG_CPU_SUP_AMD
-static unsigned int __cpuinit amd_calc_l3_indices(void)
-{
-       /*
-        * We're called over smp_call_function_single() and therefore
-        * are on the correct cpu.
-        */
-       int cpu = smp_processor_id();
-       int node = cpu_to_node(cpu);
-       struct pci_dev *dev = node_to_k8_nb_misc(node);
-       unsigned int sc0, sc1, sc2, sc3;
-       u32 val = 0;
-
-       pci_read_config_dword(dev, 0x1C4, &val);
-
-       /* calculate subcache sizes */
-       sc0 = !(val & BIT(0));
-       sc1 = !(val & BIT(4));
-       sc2 = !(val & BIT(8))  + !(val & BIT(9));
-       sc3 = !(val & BIT(12)) + !(val & BIT(13));
-
-       return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
-}
-
 static void __cpuinit
 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
 {
@@ -332,108 +299,13 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
        if (boot_cpu_data.x86 == 0x11)
                return;
 
-       /* see errata #382 and #388 */
-       if ((boot_cpu_data.x86 == 0x10) &&
-           ((boot_cpu_data.x86_model < 0x8) ||
-            (boot_cpu_data.x86_mask  < 0x1)))
+       /* see erratum #382 */
+       if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
                return;
 
-       /* not in virtualized environments */
-       if (num_k8_northbridges == 0)
-               return;
-
-       this_leaf->can_disable = true;
-       this_leaf->l3_indices  = amd_calc_l3_indices();
-}
-
-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
-                                 unsigned int index)
-{
-       int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
-       int node = amd_get_nb_id(cpu);
-       struct pci_dev *dev = node_to_k8_nb_misc(node);
-       unsigned int reg = 0;
-
-       if (!this_leaf->can_disable)
-               return -EINVAL;
-
-       if (!dev)
-               return -EINVAL;
-
-       pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
-       return sprintf(buf, "0x%08x\n", reg);
-}
-
-#define SHOW_CACHE_DISABLE(index)                                      \
-static ssize_t                                                         \
-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf)  \
-{                                                                      \
-       return show_cache_disable(this_leaf, buf, index);               \
-}
-SHOW_CACHE_DISABLE(0)
-SHOW_CACHE_DISABLE(1)
-
-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
-       const char *buf, size_t count, unsigned int index)
-{
-       int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
-       int node = amd_get_nb_id(cpu);
-       struct pci_dev *dev = node_to_k8_nb_misc(node);
-       unsigned long val = 0;
-
-#define SUBCACHE_MASK  (3UL << 20)
-#define SUBCACHE_INDEX 0xfff
-
-       if (!this_leaf->can_disable)
-               return -EINVAL;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       if (!dev)
-               return -EINVAL;
-
-       if (strict_strtoul(buf, 10, &val) < 0)
-               return -EINVAL;
-
-       /* do not allow writes outside of allowed bits */
-       if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
-           ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
-               return -EINVAL;
-
-       val |= BIT(30);
-       pci_write_config_dword(dev, 0x1BC + index * 4, val);
-       /*
-        * We need to WBINVD on a core on the node containing the L3 cache which
-        * indices we disable therefore a simple wbinvd() is not sufficient.
-        */
-       wbinvd_on_cpu(cpu);
-       pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
-       return count;
+       this_leaf->can_disable = 1;
 }
 
-#define STORE_CACHE_DISABLE(index)                                     \
-static ssize_t                                                         \
-store_cache_disable_##index(struct _cpuid4_info *this_leaf,            \
-                           const char *buf, size_t count)              \
-{                                                                      \
-       return store_cache_disable(this_leaf, buf, count, index);       \
-}
-STORE_CACHE_DISABLE(0)
-STORE_CACHE_DISABLE(1)
-
-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
-               show_cache_disable_0, store_cache_disable_0);
-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
-               show_cache_disable_1, store_cache_disable_1);
-
-#else  /* CONFIG_CPU_SUP_AMD */
-static void __cpuinit
-amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
-{
-};
-#endif /* CONFIG_CPU_SUP_AMD */
-
 static int
 __cpuinit cpuid4_cache_lookup_regs(int index,
                                   struct _cpuid4_info_regs *this_leaf)
@@ -651,19 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
 {
        struct _cpuid4_info     *this_leaf, *sibling_leaf;
        unsigned long num_threads_sharing;
-       int index_msb, i, sibling;
+       int index_msb, i;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
-               for_each_cpu(i, c->llc_shared_map) {
+               struct cpuinfo_x86 *d;
+               for_each_online_cpu(i) {
                        if (!per_cpu(cpuid4_info, i))
                                continue;
+                       d = &cpu_data(i);
                        this_leaf = CPUID4_INFO_IDX(i, index);
-                       for_each_cpu(sibling, c->llc_shared_map) {
-                               if (!cpu_online(sibling))
-                                       continue;
-                               set_bit(sibling, this_leaf->shared_cpu_map);
-                       }
+                       cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
+                                    d->llc_shared_map);
                }
                return;
        }
@@ -855,6 +726,82 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
 #define to_object(k)   container_of(k, struct _index_kobject, kobj)
 #define to_attr(a)     container_of(a, struct _cache_attr, attr)
 
+static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+                                 unsigned int index)
+{
+       int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+       int node = cpu_to_node(cpu);
+       struct pci_dev *dev = node_to_k8_nb_misc(node);
+       unsigned int reg = 0;
+
+       if (!this_leaf->can_disable)
+               return -EINVAL;
+
+       if (!dev)
+               return -EINVAL;
+
+       pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+       return sprintf(buf, "%x\n", reg);
+}
+
+#define SHOW_CACHE_DISABLE(index)                                      \
+static ssize_t                                                         \
+show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf)          \
+{                                                                      \
+       return show_cache_disable(this_leaf, buf, index);               \
+}
+SHOW_CACHE_DISABLE(0)
+SHOW_CACHE_DISABLE(1)
+
+static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
+       const char *buf, size_t count, unsigned int index)
+{
+       int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+       int node = cpu_to_node(cpu);
+       struct pci_dev *dev = node_to_k8_nb_misc(node);
+       unsigned long val = 0;
+       unsigned int scrubber = 0;
+
+       if (!this_leaf->can_disable)
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (!dev)
+               return -EINVAL;
+
+       if (strict_strtoul(buf, 10, &val) < 0)
+               return -EINVAL;
+
+       val |= 0xc0000000;
+
+       pci_read_config_dword(dev, 0x58, &scrubber);
+       scrubber &= ~0x1f000000;
+       pci_write_config_dword(dev, 0x58, scrubber);
+
+       pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
+       wbinvd();
+       pci_write_config_dword(dev, 0x1BC + index * 4, val);
+       return count;
+}
+
+#define STORE_CACHE_DISABLE(index)                                     \
+static ssize_t                                                         \
+store_cache_disable_##index(struct _cpuid4_info *this_leaf,            \
+                           const char *buf, size_t count)              \
+{                                                                      \
+       return store_cache_disable(this_leaf, buf, count, index);       \
+}
+STORE_CACHE_DISABLE(0)
+STORE_CACHE_DISABLE(1)
+
+struct _cache_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct _cpuid4_info *, char *);
+       ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+};
+
 #define define_one_ro(_name) \
 static struct _cache_attr _name = \
        __ATTR(_name, 0444, show_##_name, NULL)
@@ -869,28 +816,23 @@ define_one_ro(size);
 define_one_ro(shared_cpu_map);
 define_one_ro(shared_cpu_list);
 
-#define DEFAULT_SYSFS_CACHE_ATTRS      \
-       &type.attr,                     \
-       &level.attr,                    \
-       &coherency_line_size.attr,      \
-       &physical_line_partition.attr,  \
-       &ways_of_associativity.attr,    \
-       &number_of_sets.attr,           \
-       &size.attr,                     \
-       &shared_cpu_map.attr,           \
-       &shared_cpu_list.attr
+static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
+               show_cache_disable_0, store_cache_disable_0);
+static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
+               show_cache_disable_1, store_cache_disable_1);
 
 static struct attribute *default_attrs[] = {
-       DEFAULT_SYSFS_CACHE_ATTRS,
-       NULL
-};
-
-static struct attribute *default_l3_attrs[] = {
-       DEFAULT_SYSFS_CACHE_ATTRS,
-#ifdef CONFIG_CPU_SUP_AMD
+       &type.attr,
+       &level.attr,
+       &coherency_line_size.attr,
+       &physical_line_partition.attr,
+       &ways_of_associativity.attr,
+       &number_of_sets.attr,
+       &size.attr,
+       &shared_cpu_map.attr,
+       &shared_cpu_list.attr,
        &cache_disable_0.attr,
        &cache_disable_1.attr,
-#endif
        NULL
 };
 
@@ -981,7 +923,6 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
        unsigned int cpu = sys_dev->id;
        unsigned long i, j;
        struct _index_kobject *this_object;
-       struct _cpuid4_info   *this_leaf;
        int retval;
 
        retval = cpuid4_cache_sysfs_init(cpu);
@@ -1000,14 +941,6 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
                this_object = INDEX_KOBJECT_PTR(cpu, i);
                this_object->cpu = cpu;
                this_object->index = i;
-
-               this_leaf = CPUID4_INFO_IDX(cpu, i);
-
-               if (this_leaf->can_disable)
-                       ktype_cache.default_attrs = default_l3_attrs;
-               else
-                       ktype_cache.default_attrs = default_attrs;
-
                retval = kobject_init_and_add(&(this_object->kobj),
                                              &ktype_cache,
                                              per_cpu(cache_kobject, cpu),
index 8387792a696b025a4b34acb3ea90bd50670a4aff..83a3d1f4efca9ff447e0e2b2f902d4c7d4d16248 100644 (file)
@@ -140,7 +140,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                                address = (low & MASK_BLKPTR_LO) >> 21;
                                if (!address)
                                        break;
-
                                address += MCG_XBLK_ADDR;
                        } else
                                ++address;
@@ -148,8 +147,12 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                        if (rdmsr_safe(address, &low, &high))
                                break;
 
-                       if (!(high & MASK_VALID_HI))
-                               continue;
+                       if (!(high & MASK_VALID_HI)) {
+                               if (block)
+                                       continue;
+                               else
+                                       break;
+                       }
 
                        if (!(high & MASK_CNTP_HI)  ||
                             (high & MASK_LOCKED_HI))
index 650c6a5bdae6afc6e23b985687319fa90fd25a47..73c86db5acbebacbbb5a58d9603237fd916d80f2 100644 (file)
@@ -948,7 +948,7 @@ int __init amd_special_default_mtrr(void)
 
        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
                return 0;
-       if (boot_cpu_data.x86 < 0xf)
+       if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
                return 0;
        /* In case some hypervisor doesn't pass SYSCFG through: */
        if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
index 0ff02cae710b55bc190f3f921972bad704478aed..b5801c311846304f2fc2263732e93a6a83c07217 100644 (file)
@@ -190,97 +190,6 @@ static u64 __read_mostly hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 
-static const u64 westmere_hw_cache_event_ids
-                               [PERF_COUNT_HW_CACHE_MAX]
-                               [PERF_COUNT_HW_CACHE_OP_MAX]
-                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
-               [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
-               [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
-               [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
-       },
- },
- [ C(L1I ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
-               [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(LL  ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
-               [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
-               [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
-               [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
-       },
- },
- [ C(DTLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
-               [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
-               [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(ITLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
-               [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
- [ C(BPU ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
-               [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
-};
-
 static const u64 nehalem_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1005,11 +914,8 @@ static int __hw_perf_event_init(struct perf_event *event)
                if (atomic_read(&active_events) == 0) {
                        if (!reserve_pmc_hardware())
                                err = -EBUSY;
-                       else {
+                       else
                                err = reserve_bts_hardware();
-                               if (err)
-                                       release_pmc_hardware();
-                       }
                }
                if (!err)
                        atomic_inc(&active_events);
@@ -2093,7 +1999,6 @@ static int intel_pmu_init(void)
         * Install the hw-cache-events table:
         */
        switch (boot_cpu_data.x86_model) {
-
        case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
        case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
        case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
@@ -2104,9 +2009,7 @@ static int intel_pmu_init(void)
                pr_cont("Core2 events, ");
                break;
        default:
-       case 26: /* 45 nm nehalem, "Bloomfield" */
-       case 30: /* 45 nm nehalem, "Lynnfield" */
-       case 46: /* 45 nm nehalem-ex, "Beckton" */
+       case 26:
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -2118,14 +2021,6 @@ static int intel_pmu_init(void)
 
                pr_cont("Atom events, ");
                break;
-
-       case 37: /* 32 nm nehalem, "Clarkdale" */
-       case 44: /* 32 nm nehalem, "Gulftown" */
-               memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
-                      sizeof(hw_cache_event_ids));
-
-               pr_cont("Westmere events, ");
-               break;
        }
        return 0;
 }
index 9580152f581356151fd0a35eaac6d57ed1d3864f..1cbed97b59cf841d8c41b4466ac040cf1c794b03 100644 (file)
@@ -22,7 +22,6 @@
  */
 
 #include <linux/dmi.h>
-#include <linux/jiffies.h>
 #include <asm/div64.h>
 #include <asm/vmware.h>
 #include <asm/x86_init.h>
@@ -51,7 +50,7 @@ static inline int __vmware_platform(void)
 
 static unsigned long vmware_get_tsc_khz(void)
 {
-       uint64_t tsc_hz, lpj;
+       uint64_t tsc_hz;
        uint32_t eax, ebx, ecx, edx;
 
        VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
@@ -62,13 +61,6 @@ static unsigned long vmware_get_tsc_khz(void)
        printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
                         (unsigned long) tsc_hz / 1000,
                         (unsigned long) tsc_hz % 1000);
-
-       if (!preset_lpj) {
-               lpj = ((u64)tsc_hz * 1000);
-               do_div(lpj, HZ);
-               preset_lpj = lpj;
-       }
-
        return tsc_hz;
 }
 
index ff958248e61d7d48965c7b61c3cfdf32bc551f23..5e409dc298a479d3f72e9a65990edb5fedc514da 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
+#include <asm/iommu.h>
 
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
@@ -103,5 +104,10 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 #ifdef CONFIG_HPET_TIMER
        hpet_disable();
 #endif
+
+#ifdef CONFIG_X86_64
+       pci_iommu_shutdown();
+#endif
+
        crash_save_cpu(regs, safe_smp_processor_id());
 }
index 994828899e098350d12ca73217235af843b0d497..045b36cada655370382231cb186d45d5d8820d95 100644 (file)
@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
        if (!csize)
                return 0;
 
-       vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
+       vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
        if (!vaddr)
                return -ENOMEM;
 
@@ -46,7 +46,6 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
        } else
                memcpy(buf, vaddr + offset, csize);
 
-       set_iounmap_nonlazy();
        iounmap(vaddr);
        return csize;
 }
index 34c3308730f878dbe1e871bcd0e2ac17eaf0ba9a..050c278481b187b6f76528261ea3ce6540c1d82f 100644 (file)
@@ -324,7 +324,7 @@ ENTRY(startup_32_smp)
 /*
  * Enable paging
  */
-       movl pa(initial_page_table), %eax
+       movl $pa(swapper_pg_dir),%eax
        movl %eax,%cr3          /* set the page table pointer.. */
        movl %cr0,%eax
        orl  $X86_CR0_PG,%eax
@@ -604,8 +604,6 @@ ignore_int:
 .align 4
 ENTRY(initial_code)
        .long i386_start_kernel
-ENTRY(initial_page_table)
-       .long pa(swapper_pg_dir)
 
 /*
  * BSS section
@@ -621,10 +619,6 @@ ENTRY(swapper_pg_dir)
 #endif
 swapper_pg_fixmap:
        .fill 1024,4,0
-#ifdef CONFIG_X86_TRAMPOLINE
-ENTRY(trampoline_pg_dir)
-       .fill 1024,4,0
-#endif
 ENTRY(empty_zero_page)
        .fill 4096,1,0
 
index c771e1a37b9de8d7b5f81d79dd6cd33f6ab28e72..58778736496a37170a63e56019a040267098d183 100644 (file)
@@ -385,28 +385,11 @@ static int hpet_next_event(unsigned long delta,
        hpet_writel(cnt, HPET_Tn_CMP(timer));
 
        /*
-        * We need to read back the CMP register on certain HPET
-        * implementations (ATI chipsets) which seem to delay the
-        * transfer of the compare register into the internal compare
-        * logic. With small deltas this might actually be too late as
-        * the counter could already be higher than the compare value
-        * at that point and we would wait for the next hpet interrupt
-        * forever. We found out that reading the CMP register back
-        * forces the transfer so we can rely on the comparison with
-        * the counter register below. If the read back from the
-        * compare register does not match the value we programmed
-        * then we might have a real hardware problem. We can not do
-        * much about it here, but at least alert the user/admin with
-        * a prominent warning.
-        * An erratum on some chipsets (ICH9,..), results in comparator read
-        * immediately following a write returning old value. Workaround
-        * for this is to read this value second time, when first
-        * read returns old value.
+        * We need to read back the CMP register to make sure that
+        * what we wrote hit the chip before we compare it to the
+        * counter.
         */
-       if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
-               WARN_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt,
-                 KERN_WARNING "hpet: compare register read back failed.\n");
-       }
+       WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
 
        return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
 }
@@ -497,7 +480,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
 {
        unsigned int irq;
 
-       irq = create_irq_nr(0, -1);
+       irq = create_irq();
        if (!irq)
                return -EINVAL;
 
@@ -949,7 +932,7 @@ fs_initcall(hpet_late_init);
 
 void hpet_disable(void)
 {
-       if (is_hpet_capable() && hpet_virt_address) {
+       if (is_hpet_capable()) {
                unsigned long cfg = hpet_readl(HPET_CFG);
 
                if (hpet_legacy_int_enabled) {
index 9b895464dd0311f9c0c4619b5cf7e3f5e94dd2de..cbc4332a77b25927947d18c69f47188465a29d3e 100644 (file)
@@ -121,17 +121,3 @@ void k8_flush_garts(void)
 }
 EXPORT_SYMBOL_GPL(k8_flush_garts);
 
-static __init int init_k8_nbs(void)
-{
-       int err = 0;
-
-       err = cache_k8_northbridges();
-
-       if (err < 0)
-               printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
-
-       return err;
-}
-
-/* This has to go after the PCI subsystem */
-fs_initcall(init_k8_nbs);
index e07bc4ef282912f3747d94a9402f911dd80198b2..5be95ef4ffec6baa9bd390119d389dceb94793ad 100644 (file)
@@ -359,6 +359,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
                x86_init.mpparse.mpc_record(1);
        }
 
+#ifdef CONFIG_X86_BIGSMP
+       generic_bigsmp_probe();
+#endif
+
+       if (apic->setup_apic_routing)
+               apic->setup_apic_routing();
+
        if (!num_processors)
                printk(KERN_ERR "MPTABLE: no processors registered!\n");
        return num_processors;
index 38faf7211d0b128a62394637005ccf3dd01ebf40..4006c522adc780a0ce016b43b2d5a7ae93ca0f14 100644 (file)
@@ -115,7 +115,6 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
        unsigned long flags;
        int ret = -EIO;
        int i;
-       int restarts = 0;
 
        spin_lock_irqsave(&ec_lock, flags);
 
@@ -172,9 +171,7 @@ restart:
                        if (wait_on_obf(0x6c, 1)) {
                                printk(KERN_ERR "olpc-ec:  timeout waiting for"
                                                " EC to provide data!\n");
-                               if (restarts++ < 10)
-                                       goto restart;
-                               goto err;
+                               goto restart;
                        }
                        outbuf[i] = inb(0x68);
                        printk(KERN_DEBUG "olpc-ec:  received 0x%x\n",
index 1a2d4b19eb23380babc97f401355a39d43dd79cf..e6ec8a2df1c368fa3a0cfd5b8db62076b1157c44 100644 (file)
@@ -102,16 +102,11 @@ int use_calgary __read_mostly = 0;
 #define PMR_SOFTSTOPFAULT      0x40000000
 #define PMR_HARDSTOP           0x20000000
 
-/*
- * The maximum PHB bus number.
- * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
- * x3950M2: 4 chassis, 48 PHBs per chassis        = 192
- * x3950 (PCIE): 8 chassis, 32 PHBs per chassis   = 256
- * x3950 (PCIX): 8 chassis, 16 PHBs per chassis   = 128
- */
-#define MAX_PHB_BUS_NUM                256
-
-#define PHBS_PER_CALGARY         4
+#define MAX_NUM_OF_PHBS                8 /* how many PHBs in total? */
+#define MAX_NUM_CHASSIS                8 /* max number of chassis */
+/* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
+#define MAX_PHB_BUS_NUM                (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
+#define PHBS_PER_CALGARY       4
 
 /* register offsets in Calgary's internal register space */
 static const unsigned long tar_offsets[] = {
@@ -1058,6 +1053,8 @@ static int __init calgary_init_one(struct pci_dev *dev)
        struct iommu_table *tbl;
        int ret;
 
+       BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
+
        bbar = busno_to_bbar(dev->bus->number);
        ret = calgary_setup_tar(dev, bbar);
        if (ret)
index 1c766915094e9babe9ef610388ef2b4775c1ca12..fcc0b5c022c10cbe545c7f781cf25bcdd805e7f8 100644 (file)
@@ -553,9 +553,6 @@ static void enable_gart_translations(void)
 
                enable_gart_translation(dev, __pa(agp_gatt_table));
        }
-
-       /* Flush the GART-TLB to remove stale entries */
-       k8_flush_garts();
 }
 
 /*
@@ -720,7 +717,7 @@ void __init gart_iommu_init(void)
        unsigned long scratch;
        long i;
 
-       if (num_k8_northbridges == 0)
+       if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
                return;
 
 #ifndef CONFIG_AGP_AMD64
index 5fd5b07bf3a5774cd996e312d9fe55533a37b984..f010ab424f1f9ccc03b3f8ed7b4e759609ef733a 100644 (file)
@@ -439,39 +439,21 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
 }
 
 /*
- * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
- * For more information see
- * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
- * - Erratum #365 for family 0x11 (not affected because C1e not in use)
+ * Check for AMD CPUs, which have potentially C1E support
  */
 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
 {
-       u64 val;
        if (c->x86_vendor != X86_VENDOR_AMD)
-               goto no_c1e_idle;
+               return 0;
 
-       /* Family 0x0f models < rev F do not have C1E */
-       if (c->x86 == 0x0F && c->x86_model >= 0x40)
-               return 1;
+       if (c->x86 < 0x0F)
+               return 0;
 
-       if (c->x86 == 0x10) {
-               /*
-                * check OSVW bit for CPUs that are not affected
-                * by erratum #400
-                */
-               if (cpu_has(c, X86_FEATURE_OSVW)) {
-                       rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
-                       if (val >= 2) {
-                               rdmsrl(MSR_AMD64_OSVW_STATUS, val);
-                               if (!(val & BIT(1)))
-                                       goto no_c1e_idle;
-                       }
-               }
-               return 1;
-       }
+       /* Family 0x0f models < rev F do not have C1E */
+       if (c->x86 == 0x0f && c->x86_model < 0x40)
+               return 0;
 
-no_c1e_idle:
-       return 0;
+       return 1;
 }
 
 static cpumask_var_t c1e_mask;
index 868fdb407bb90c912c8cb91f8d2df5ad09a90885..f9ce04f610038ca14d91efbedb3a1055b0ac3129 100644 (file)
@@ -295,10 +295,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 
        set_tsk_thread_flag(p, TIF_FORK);
 
+       p->thread.fs = me->thread.fs;
+       p->thread.gs = me->thread.gs;
+
        savesegment(gs, p->thread.gsindex);
-       p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
        savesegment(fs, p->thread.fsindex);
-       p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
        savesegment(es, p->thread.es);
        savesegment(ds, p->thread.ds);
 
@@ -545,7 +546,6 @@ void set_personality_ia32(void)
 
        /* Make sure to be in 32bit mode */
        set_thread_flag(TIF_IA32);
-       current->personality |= force_personality32;
 
        /* Prepare the first "return" to user space */
        current_thread_info()->status |= TS_COMPAT;
index dfdfe4662e0508073fbc72f946d006c1ac299497..03801f2f761fc312ba80353425c1cc7243a0105f 100644 (file)
@@ -109,14 +109,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
        return pv_tsc_khz;
 }
 
-static atomic64_t last_value = ATOMIC64_INIT(0);
-
 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
 {
        struct pvclock_shadow_time shadow;
        unsigned version;
        cycle_t ret, offset;
-       u64 last;
 
        do {
                version = pvclock_get_time_values(&shadow, src);
@@ -126,27 +123,6 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
                barrier();
        } while (version != src->version);
 
-       /*
-        * Assumption here is that last_value, a global accumulator, always goes
-        * forward. If we are less than that, we should not be much smaller.
-        * We assume there is an error marging we're inside, and then the correction
-        * does not sacrifice accuracy.
-        *
-        * For reads: global may have changed between test and return,
-        * but this means someone else updated poked the clock at a later time.
-        * We just need to make sure we are not seeing a backwards event.
-        *
-        * For updates: last_value = ret is not enough, since two vcpus could be
-        * updating at the same time, and one of them could be slightly behind,
-        * making the assumption that last_value always go forward fail to hold.
-        */
-       last = atomic64_read(&last_value);
-       do {
-               if (ret < last)
-                       return last;
-               last = atomic64_cmpxchg(&last_value, last, ret);
-       } while (unlikely(last != ret));
-
        return ret;
 }
 
index 12e9feaa2f7aba947b65ca2fe3611be81321a8f1..0040164f1a8253c4832ace5e7908c77a73a031fa 100644 (file)
@@ -512,7 +512,6 @@ static void __init quirk_amd_nb_node(struct pci_dev *dev)
 {
        struct pci_dev *nb_ht;
        unsigned int devfn;
-       u32 node;
        u32 val;
 
        devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
@@ -521,13 +520,7 @@ static void __init quirk_amd_nb_node(struct pci_dev *dev)
                return;
 
        pci_read_config_dword(nb_ht, 0x60, &val);
-       node = val & 7;
-       /*
-        * Some hardware may return an invalid node ID,
-        * so check it first:
-        */
-       if (node_online(node))
-               set_dev_node(&dev->dev, node);
+       set_dev_node(&dev->dev, val & 7);
        pci_dev_put(nb_ht);
 }
 
index 200fcde41aa24de89cd5a90021b7b7eb3b18866a..bff34d68d9d1df826310d4296828e96b8000868d 100644 (file)
@@ -461,14 +461,6 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
                },
        },
-       {       /* Handle problems with rebooting on the iMac9,1. */
-               .callback = set_pci_reboot,
-               .ident = "Apple iMac9,1",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
-               },
-       },
        { }
 };
 
@@ -633,7 +625,7 @@ void native_machine_shutdown(void)
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
         */
-       stop_other_cpus();
+       smp_send_stop();
 #endif
 
        lapic_shutdown();
index 5449a26982428bbd092868c89ce621fb4babd666..8425f7ebe89e699a15316070f4068052977f25d6 100644 (file)
 #include <asm/numa_64.h>
 #endif
 #include <asm/mce.h>
-#include <asm/trampoline.h>
 
 /*
  * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -689,17 +688,6 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
                },
        },
-       /*
-        * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
-        * match on the product name.
-        */
-       {
-               .callback = dmi_low_memory_corruption,
-               .ident = "Phoenix BIOS",
-               .matches = {
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
-               },
-       },
 #endif
        {}
 };
@@ -999,8 +987,6 @@ void __init setup_arch(char **cmdline_p)
        paging_init();
        x86_init.paging.pagetable_setup_done(swapper_pg_dir);
 
-       setup_trampoline_page_table();
-
        tboot_probe();
 
 #ifdef CONFIG_X86_64
index 29f0a78ec88743a59f29770d067fc4880ec36656..ec1de97600e70638bcfdcb53da52a83bc1288829 100644 (file)
@@ -158,10 +158,10 @@ asmlinkage void smp_reboot_interrupt(void)
        irq_exit();
 }
 
-static void native_stop_other_cpus(int wait)
+static void native_smp_send_stop(void)
 {
        unsigned long flags;
-       unsigned long timeout;
+       unsigned long wait;
 
        if (reboot_force)
                return;
@@ -178,12 +178,9 @@ static void native_stop_other_cpus(int wait)
        if (num_online_cpus() > 1) {
                apic->send_IPI_allbutself(REBOOT_VECTOR);
 
-               /*
-                * Don't wait longer than a second if the caller
-                * didn't ask us to wait.
-                */
-               timeout = USEC_PER_SEC;
-               while (num_online_cpus() > 1 && (wait || timeout--))
+               /* Don't wait longer than a second */
+               wait = USEC_PER_SEC;
+               while (num_online_cpus() > 1 && wait--)
                        udelay(1);
        }
 
@@ -229,7 +226,7 @@ struct smp_ops smp_ops = {
        .smp_prepare_cpus       = native_smp_prepare_cpus,
        .smp_cpus_done          = native_smp_cpus_done,
 
-       .stop_other_cpus        = native_stop_other_cpus,
+       .smp_send_stop          = native_smp_send_stop,
        .smp_send_reschedule    = native_smp_send_reschedule,
 
        .cpu_up                 = native_cpu_up,
index 7e8e905e2ccb98c1fe79c11fda27d122420401c5..565ebc65920e3e685161758acb03c4f8106c6b40 100644 (file)
@@ -70,6 +70,7 @@
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
+static int low_mappings;
 #endif
 
 /* State of each CPU */
@@ -87,25 +88,6 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
 #define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
 #define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
-
-/*
- * We need this for trampoline_base protection from concurrent accesses when
- * off- and onlining cores wildly.
- */
-static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock()
-{
-        mutex_lock(&x86_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock()
-{
-        mutex_unlock(&x86_cpu_hotplug_driver_mutex);
-}
-
-ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
-ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
 #else
 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 #define get_idle_for_cpu(x)      (idle_thread_array[(x)])
@@ -291,18 +273,6 @@ notrace static void __cpuinit start_secondary(void *unused)
         * fragile that we want to limit the things done here to the
         * most necessary things.
         */
-
-#ifdef CONFIG_X86_32
-       /*
-        * Switch away from the trampoline page-table
-        *
-        * Do this before cpu_init() because it needs to access per-cpu
-        * data which may not be mapped in the trampoline page-table.
-        */
-       load_cr3(swapper_pg_dir);
-       __flush_tlb_all();
-#endif
-
        vmi_bringup();
        cpu_init();
        preempt_disable();
@@ -321,6 +291,12 @@ notrace static void __cpuinit start_secondary(void *unused)
                enable_8259A_irq(0);
        }
 
+#ifdef CONFIG_X86_32
+       while (low_mappings)
+               cpu_relax();
+       __flush_tlb_all();
+#endif
+
        /* This must be done before setting cpu_online_mask */
        set_cpu_sibling_map(raw_smp_processor_id());
        wmb();
@@ -746,7 +722,6 @@ do_rest:
 #ifdef CONFIG_X86_32
        /* Stack for startup_32 can be just as for start_secondary onwards */
        irq_ctx_init(cpu);
-       initial_page_table = __pa(&trampoline_pg_dir);
 #else
        clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
        initial_gs = per_cpu_offset(cpu);
@@ -891,8 +866,20 @@ int __cpuinit native_cpu_up(unsigned int cpu)
 
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
+#ifdef CONFIG_X86_32
+       /* init low mem mapping */
+       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+               min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+       flush_tlb_all();
+       low_mappings = 1;
+
        err = do_boot_cpu(apicid, cpu);
 
+       zap_low_mappings(false);
+       low_mappings = 0;
+#else
+       err = do_boot_cpu(apicid, cpu);
+#endif
        if (err) {
                pr_debug("do_boot_cpu failed %d\n", err);
                return -EIO;
@@ -1079,7 +1066,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        set_cpu_sibling_map(0);
 
        enable_IR_x2apic();
+#ifdef CONFIG_X86_64
        default_setup_apic_routing();
+#endif
 
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
index 46b827778d16601dab7a0ae3306a25d3c4812fbc..86c9f91b48aea8ef47f0e77372fcf2d5f822d77e 100644 (file)
@@ -46,7 +46,6 @@
 
 /* Global pointer to shared data; NULL means no measured launch. */
 struct tboot *tboot __read_mostly;
-EXPORT_SYMBOL(tboot);
 
 /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
 #define AP_WAIT_TIMEOUT                1
index 0ac23a7bf6f162edd191e53c61fe2b0cf1da3a17..cd022121cab611629ec9b04de51677e44bef3567 100644 (file)
@@ -1,7 +1,6 @@
 #include <linux/io.h>
 
 #include <asm/trampoline.h>
-#include <asm/pgtable.h>
 #include <asm/e820.h>
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -40,19 +39,3 @@ unsigned long __trampinit setup_trampoline(void)
        memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
        return virt_to_phys(trampoline_base);
 }
-
-void __init setup_trampoline_page_table(void)
-{
-#ifdef CONFIG_X86_32
-       /* Copy kernel address range */
-       clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
-                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       KERNEL_PGD_PTRS);
-
-       /* Initialize low mappings */
-       clone_pgd_range(trampoline_pg_dir,
-                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS,
-                             KERNEL_PGD_BOUNDARY));
-#endif
-}
index aaefa71888cbedde0947060496a38b4cbf63103b..597683aa5ba0ba3ef800ab39179669ded4e3aa57 100644 (file)
@@ -626,44 +626,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
        local_irq_restore(flags);
 }
 
-static unsigned long long cyc2ns_suspend;
-
-void save_sched_clock_state(void)
-{
-       if (!sched_clock_stable)
-               return;
-
-       cyc2ns_suspend = sched_clock();
-}
-
-/*
- * Even on processors with invariant TSC, TSC gets reset in some the
- * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
- * arbitrary value (still sync'd across cpu's) during resume from such sleep
- * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
- * that sched_clock() continues from the point where it was left off during
- * suspend.
- */
-void restore_sched_clock_state(void)
-{
-       unsigned long long offset;
-       unsigned long flags;
-       int cpu;
-
-       if (!sched_clock_stable)
-               return;
-
-       local_irq_save(flags);
-
-       __get_cpu_var(cyc2ns_offset) = 0;
-       offset = cyc2ns_suspend - sched_clock();
-
-       for_each_possible_cpu(cpu)
-               per_cpu(cyc2ns_offset, cpu) = offset;
-
-       local_irq_restore(flags);
-}
-
 #ifdef CONFIG_CPU_FREQ
 
 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
index 62f39d79b7754557f0725d5b80b958e5d7c245d8..8cb4974ff5990c19267077d473caeeb504fe5bae 100644 (file)
@@ -73,8 +73,7 @@ void update_vsyscall_tz(void)
        write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
 }
 
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
-                    u32 mult)
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
 {
        unsigned long flags;
 
@@ -83,7 +82,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
        vsyscall_gtod_data.clock.vread = clock->vread;
        vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
        vsyscall_gtod_data.clock.mask = clock->mask;
-       vsyscall_gtod_data.clock.mult = mult;
+       vsyscall_gtod_data.clock.mult = clock->mult;
        vsyscall_gtod_data.clock.shift = clock->shift;
        vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
        vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
index 1350e43cc02fa8fe298b3c607a9cdf68bf67f9b7..e02dbb670d5b56b826cf9394b25300b2dc49c54e 100644 (file)
@@ -75,7 +75,6 @@
 #define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
 #define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
 #define GroupMask   0xff        /* Group number stored in bits 0:7 */
-#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
 /* Source 2 operand type */
 #define Src2None    (0<<29)
 #define Src2CL      (1<<29)
@@ -87,7 +86,6 @@
 enum {
        Group1_80, Group1_81, Group1_82, Group1_83,
        Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
-       Group8, Group9,
 };
 
 static u32 opcode_table[256] = {
@@ -205,7 +203,7 @@ static u32 opcode_table[256] = {
        SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
        /* 0xF0 - 0xF7 */
        0, 0, 0, 0,
-       ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
+       ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
        /* 0xF8 - 0xFF */
        ImplicitOps, 0, ImplicitOps, ImplicitOps,
        ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
@@ -213,20 +211,16 @@ static u32 opcode_table[256] = {
 
 static u32 twobyte_table[256] = {
        /* 0x00 - 0x0F */
-       0, Group | GroupDual | Group7, 0, 0,
-       0, ImplicitOps, ImplicitOps | Priv, 0,
-       ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
-       0, ImplicitOps | ModRM, 0, 0,
+       0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
+       ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
        /* 0x10 - 0x1F */
        0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
        /* 0x20 - 0x2F */
-       ModRM | ImplicitOps | Priv, ModRM | Priv,
-       ModRM | ImplicitOps | Priv, ModRM | Priv,
-       0, 0, 0, 0,
+       ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0,
        /* 0x30 - 0x3F */
-       ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
-       ImplicitOps, ImplicitOps | Priv, 0, 0,
+       ImplicitOps, 0, ImplicitOps, 0,
+       ImplicitOps, ImplicitOps, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0,
        /* 0x40 - 0x47 */
        DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -264,12 +258,11 @@ static u32 twobyte_table[256] = {
        0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
            DstReg | SrcMem16 | ModRM | Mov,
        /* 0xB8 - 0xBF */
-       0, 0, Group | Group8, DstMem | SrcReg | ModRM | BitOp,
+       0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM | BitOp,
        0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
            DstReg | SrcMem16 | ModRM | Mov,
        /* 0xC0 - 0xCF */
-       0, 0, 0, DstMem | SrcReg | ModRM | Mov,
-       0, 0, 0, Group | GroupDual | Group9,
+       0, 0, 0, DstMem | SrcReg | ModRM | Mov, 0, 0, 0, ImplicitOps | ModRM,
        0, 0, 0, 0, 0, 0, 0, 0,
        /* 0xD0 - 0xDF */
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -318,39 +311,24 @@ static u32 group_table[] = {
        SrcMem | ModRM | Stack, 0,
        SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
        [Group7*8] =
-       0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
+       0, 0, ModRM | SrcMem, ModRM | SrcMem,
        SrcNone | ModRM | DstMem | Mov, 0,
-       SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
-       [Group8*8] =
-       0, 0, 0, 0,
-       DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
-       DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
-       [Group9*8] =
-       0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0,
+       SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
 };
 
 static u32 group2_table[] = {
        [Group7*8] =
-       SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM,
+       SrcNone | ModRM, 0, 0, SrcNone | ModRM,
        SrcNone | ModRM | DstMem | Mov, 0,
        SrcMem16 | ModRM | Mov, 0,
-       [Group9*8] =
-       0, 0, 0, 0, 0, 0, 0, 0,
 };
 
 /* EFLAGS bit definitions. */
-#define EFLG_ID (1<<21)
-#define EFLG_VIP (1<<20)
-#define EFLG_VIF (1<<19)
-#define EFLG_AC (1<<18)
 #define EFLG_VM (1<<17)
 #define EFLG_RF (1<<16)
-#define EFLG_IOPL (3<<12)
-#define EFLG_NT (1<<14)
 #define EFLG_OF (1<<11)
 #define EFLG_DF (1<<10)
 #define EFLG_IF (1<<9)
-#define EFLG_TF (1<<8)
 #define EFLG_SF (1<<7)
 #define EFLG_ZF (1<<6)
 #define EFLG_AF (1<<4)
@@ -619,7 +597,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
 
        if (linear < fc->start || linear >= fc->end) {
                size = min(15UL, PAGE_SIZE - offset_in_page(linear));
-               rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
+               rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
                if (rc)
                        return rc;
                fc->start = linear;
@@ -674,11 +652,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
                op_bytes = 3;
        *address = 0;
        rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
-                          ctxt->vcpu, NULL);
+                          ctxt->vcpu);
        if (rc)
                return rc;
        rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
-                          ctxt->vcpu, NULL);
+                          ctxt->vcpu);
        return rc;
 }
 
@@ -902,7 +880,6 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
 
        switch (mode) {
        case X86EMUL_MODE_REAL:
-       case X86EMUL_MODE_VM86:
        case X86EMUL_MODE_PROT16:
                def_op_bytes = def_ad_bytes = 2;
                break;
@@ -1212,49 +1189,6 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
        return rc;
 }
 
-static int emulate_popf(struct x86_emulate_ctxt *ctxt,
-                      struct x86_emulate_ops *ops,
-                      void *dest, int len)
-{
-       int rc;
-       unsigned long val, change_mask;
-       int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
-       int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
-
-       rc = emulate_pop(ctxt, ops, &val, len);
-       if (rc != X86EMUL_CONTINUE)
-               return rc;
-
-       change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
-               | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
-
-       switch(ctxt->mode) {
-       case X86EMUL_MODE_PROT64:
-       case X86EMUL_MODE_PROT32:
-       case X86EMUL_MODE_PROT16:
-               if (cpl == 0)
-                       change_mask |= EFLG_IOPL;
-               if (cpl <= iopl)
-                       change_mask |= EFLG_IF;
-               break;
-       case X86EMUL_MODE_VM86:
-               if (iopl < 3) {
-                       kvm_inject_gp(ctxt->vcpu, 0);
-                       return X86EMUL_PROPAGATE_FAULT;
-               }
-               change_mask |= EFLG_IF;
-               break;
-       default: /* real mode */
-               change_mask |= (EFLG_IOPL | EFLG_IF);
-               break;
-       }
-
-       *(unsigned long *)dest =
-               (ctxt->eflags & ~change_mask) | (val & change_mask);
-
-       return rc;
-}
-
 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
                                struct x86_emulate_ops *ops)
 {
@@ -1396,7 +1330,7 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
        rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
        if (rc)
                return rc;
-       rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
+       rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS);
        return rc;
 }
 
@@ -1504,7 +1438,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
 
        /* syscall is not available in real mode */
        if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
-           || ctxt->mode == X86EMUL_MODE_VM86)
+               || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
                return -1;
 
        setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1556,8 +1490,9 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
        if (c->lock_prefix)
                return -1;
 
-       /* inject #GP if in real mode */
-       if (ctxt->mode == X86EMUL_MODE_REAL) {
+       /* inject #GP if in real mode or paging is disabled */
+       if (ctxt->mode == X86EMUL_MODE_REAL ||
+               !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
                kvm_inject_gp(ctxt->vcpu, 0);
                return -1;
        }
@@ -1621,9 +1556,15 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
        if (c->lock_prefix)
                return -1;
 
-       /* inject #GP if in real mode or Virtual 8086 mode */
-       if (ctxt->mode == X86EMUL_MODE_REAL ||
-           ctxt->mode == X86EMUL_MODE_VM86) {
+       /* inject #GP if in real mode or paging is disabled */
+       if (ctxt->mode == X86EMUL_MODE_REAL
+               || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
+               kvm_inject_gp(ctxt->vcpu, 0);
+               return -1;
+       }
+
+       /* sysexit must be called from CPL 0 */
+       if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
                kvm_inject_gp(ctxt->vcpu, 0);
                return -1;
        }
@@ -1670,57 +1611,6 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
        return 0;
 }
 
-static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
-{
-       int iopl;
-       if (ctxt->mode == X86EMUL_MODE_REAL)
-               return false;
-       if (ctxt->mode == X86EMUL_MODE_VM86)
-               return true;
-       iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
-       return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
-}
-
-static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
-                                           struct x86_emulate_ops *ops,
-                                           u16 port, u16 len)
-{
-       struct kvm_segment tr_seg;
-       int r;
-       u16 io_bitmap_ptr;
-       u8 perm, bit_idx = port & 0x7;
-       unsigned mask = (1 << len) - 1;
-
-       kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR);
-       if (tr_seg.unusable)
-               return false;
-       if (tr_seg.limit < 103)
-               return false;
-       r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu,
-                         NULL);
-       if (r != X86EMUL_CONTINUE)
-               return false;
-       if (io_bitmap_ptr + port/8 > tr_seg.limit)
-               return false;
-       r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1,
-                         ctxt->vcpu, NULL);
-       if (r != X86EMUL_CONTINUE)
-               return false;
-       if ((perm >> bit_idx) & mask)
-               return false;
-       return true;
-}
-
-static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
-                                struct x86_emulate_ops *ops,
-                                u16 port, u16 len)
-{
-       if (emulator_bad_iopl(ctxt))
-               if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
-                       return false;
-       return true;
-}
-
 int
 x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
 {
@@ -1742,12 +1632,6 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
        memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
        saved_eip = c->eip;
 
-       /* Privileged instruction can be executed only in CPL=0 */
-       if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) {
-               kvm_inject_gp(ctxt->vcpu, 0);
-               goto done;
-       }
-
        if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
                memop = c->modrm_ea;
 
@@ -1880,12 +1764,7 @@ special_insn:
                break;
        case 0x6c:              /* insb */
        case 0x6d:              /* insw/insd */
-               if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
-                                         (c->d & ByteOp) ? 1 : c->op_bytes)) {
-                       kvm_inject_gp(ctxt->vcpu, 0);
-                       goto done;
-               }
-               if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
+                if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
                                1,
                                (c->d & ByteOp) ? 1 : c->op_bytes,
                                c->rep_prefix ?
@@ -1901,11 +1780,6 @@ special_insn:
                return 0;
        case 0x6e:              /* outsb */
        case 0x6f:              /* outsw/outsd */
-               if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
-                                         (c->d & ByteOp) ? 1 : c->op_bytes)) {
-                       kvm_inject_gp(ctxt->vcpu, 0);
-                       goto done;
-               }
                if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
                                0,
                                (c->d & ByteOp) ? 1 : c->op_bytes,
@@ -1992,19 +1866,25 @@ special_insn:
                break;
        case 0x8e: { /* mov seg, r/m16 */
                uint16_t sel;
+               int type_bits;
+               int err;
 
                sel = c->src.val;
-
-               if (c->modrm_reg == VCPU_SREG_CS ||
-                   c->modrm_reg > VCPU_SREG_GS) {
-                       kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
-                       goto done;
-               }
-
                if (c->modrm_reg == VCPU_SREG_SS)
                        toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
 
-               rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
+               if (c->modrm_reg <= 5) {
+                       type_bits = (c->modrm_reg == 1) ? 9 : 1;
+                       err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
+                                                         type_bits, c->modrm_reg);
+               } else {
+                       printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
+                                       c->modrm);
+                       goto cannot_emulate;
+               }
+
+               if (err < 0)
+                       goto cannot_emulate;
 
                c->dst.type = OP_NONE;  /* Disable writeback. */
                break;
@@ -2033,10 +1913,7 @@ special_insn:
                c->dst.type = OP_REG;
                c->dst.ptr = (unsigned long *) &ctxt->eflags;
                c->dst.bytes = c->op_bytes;
-               rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
-               if (rc != X86EMUL_CONTINUE)
-                       goto done;
-               break;
+               goto pop_instruction;
        case 0xa0 ... 0xa1:     /* mov */
                c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
                c->dst.val = c->src.val;
@@ -2174,9 +2051,11 @@ special_insn:
        case 0xe9: /* jmp rel */
                goto jmp;
        case 0xea: /* jmp far */
-               if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
-                                               VCPU_SREG_CS))
-                       goto done;
+               if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9,
+                                       VCPU_SREG_CS) < 0) {
+                       DPRINTF("jmp far: Failed to load CS descriptor\n");
+                       goto cannot_emulate;
+               }
 
                c->eip = c->src.val;
                break;
@@ -2194,13 +2073,7 @@ special_insn:
        case 0xef: /* out (e/r)ax,dx */
                port = c->regs[VCPU_REGS_RDX];
                io_dir_in = 0;
-       do_io:
-               if (!emulator_io_permited(ctxt, ops, port,
-                                         (c->d & ByteOp) ? 1 : c->op_bytes)) {
-                       kvm_inject_gp(ctxt->vcpu, 0);
-                       goto done;
-               }
-               if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in,
+       do_io:  if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in,
                                   (c->d & ByteOp) ? 1 : c->op_bytes,
                                   port) != 0) {
                        c->eip = saved_eip;
@@ -2225,21 +2098,13 @@ special_insn:
                c->dst.type = OP_NONE;  /* Disable writeback. */
                break;
        case 0xfa: /* cli */
-               if (emulator_bad_iopl(ctxt))
-                       kvm_inject_gp(ctxt->vcpu, 0);
-               else {
-                       ctxt->eflags &= ~X86_EFLAGS_IF;
-                       c->dst.type = OP_NONE;  /* Disable writeback. */
-               }
+               ctxt->eflags &= ~X86_EFLAGS_IF;
+               c->dst.type = OP_NONE;  /* Disable writeback. */
                break;
        case 0xfb: /* sti */
-               if (emulator_bad_iopl(ctxt))
-                       kvm_inject_gp(ctxt->vcpu, 0);
-               else {
-                       toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
-                       ctxt->eflags |= X86_EFLAGS_IF;
-                       c->dst.type = OP_NONE;  /* Disable writeback. */
-               }
+               toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
+               ctxt->eflags |= X86_EFLAGS_IF;
+               c->dst.type = OP_NONE;  /* Disable writeback. */
                break;
        case 0xfc: /* cld */
                ctxt->eflags &= ~EFLG_DF;
index fdf2e28f3bc6080b0a219039bb6472c8c8ee2d70..3a01519a49f2033074d6b8e1fb7c7cc415e34936 100644 (file)
@@ -136,6 +136,12 @@ module_param(oos_shadow, bool, 0644);
 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
                        | PT64_NX_MASK)
 
+#define PFERR_PRESENT_MASK (1U << 0)
+#define PFERR_WRITE_MASK (1U << 1)
+#define PFERR_USER_MASK (1U << 2)
+#define PFERR_RSVD_MASK (1U << 3)
+#define PFERR_FETCH_MASK (1U << 4)
+
 #define PT_PDPE_LEVEL 3
 #define PT_DIRECTORY_LEVEL 2
 #define PT_PAGE_TABLE_LEVEL 1
@@ -221,7 +227,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
-static bool is_write_protection(struct kvm_vcpu *vcpu)
+static int is_write_protection(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.cr0 & X86_CR0_WP;
 }
@@ -1496,8 +1502,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
                for_each_sp(pages, sp, parents, i) {
                        kvm_mmu_zap_page(kvm, sp);
                        mmu_pages_clear_parents(&parents);
-                       zapped++;
                }
+               zapped += pages.nr;
                kvm_mmu_pages_init(parent, &parents, &pages);
        }
 
@@ -1548,16 +1554,14 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
         */
 
        if (used_pages > kvm_nr_mmu_pages) {
-               while (used_pages > kvm_nr_mmu_pages &&
-                       !list_empty(&kvm->arch.active_mmu_pages)) {
+               while (used_pages > kvm_nr_mmu_pages) {
                        struct kvm_mmu_page *page;
 
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
-                       used_pages -= kvm_mmu_zap_page(kvm, page);
+                       kvm_mmu_zap_page(kvm, page);
                        used_pages--;
                }
-               kvm_nr_mmu_pages = used_pages;
                kvm->arch.n_free_mmu_pages = 0;
        }
        else
@@ -1604,8 +1608,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
                    && !sp->role.invalid) {
                        pgprintk("%s: zap %lx %x\n",
                                 __func__, gfn, sp->role.word);
-                       if (kvm_mmu_zap_page(kvm, sp))
-                               nn = bucket->first;
+                       kvm_mmu_zap_page(kvm, sp);
                }
        }
 }
@@ -1636,7 +1639,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct page *page;
 
-       gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
+       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
        if (gpa == UNMAPPED_GVA)
                return NULL;
@@ -1843,9 +1846,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
                spte |= PT_WRITABLE_MASK;
 
-               if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
-                       spte &= ~PT_USER_MASK;
-
                /*
                 * Optimization: for pte sync, if spte was writable the hash
                 * lookup is unnecessary (and expensive). Write protection
@@ -1901,8 +1901,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        mmu_page_remove_parent_pte(child, sptep);
-                       __set_spte(sptep, shadow_trap_nonpresent_pte);
-                       kvm_flush_remote_tlbs(vcpu->kvm);
                } else if (pfn != spte_to_pfn(*sptep)) {
                        pgprintk("hfn old %lx new %lx\n",
                                 spte_to_pfn(*sptep), pfn);
@@ -2096,13 +2094,11 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        direct = 1;
                if (mmu_check_root(vcpu, root_gfn))
                        return 1;
-               spin_lock(&vcpu->kvm->mmu_lock);
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                      PT64_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
-               spin_unlock(&vcpu->kvm->mmu_lock);
                vcpu->arch.mmu.root_hpa = root;
                return 0;
        }
@@ -2124,14 +2120,11 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = 0;
                if (mmu_check_root(vcpu, root_gfn))
                        return 1;
-               spin_lock(&vcpu->kvm->mmu_lock);
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
-               spin_unlock(&vcpu->kvm->mmu_lock);
-
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
        vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
@@ -2169,11 +2162,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
-                                 u32 access, u32 *error)
+static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
-       if (error)
-               *error = 0;
        return vaddr;
 }
 
@@ -2455,7 +2445,6 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
                r = paging32_init_context(vcpu);
 
        vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
-       vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
 
        return r;
 }
@@ -2495,9 +2484,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
                goto out;
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
-       spin_unlock(&vcpu->kvm->mmu_lock);
        r = mmu_alloc_roots(vcpu);
-       spin_lock(&vcpu->kvm->mmu_lock);
        mmu_sync_roots(vcpu);
        spin_unlock(&vcpu->kvm->mmu_lock);
        if (r)
@@ -2760,7 +2747,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
        if (tdp_enabled)
                return 0;
 
-       gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
        spin_lock(&vcpu->kvm->mmu_lock);
        r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -3258,7 +3245,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
                        audit_mappings_page(vcpu, ent, va, level - 1);
                else {
-                       gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
+                       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
                        gfn_t gfn = gpa >> PAGE_SHIFT;
                        pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
                        hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
index bac752946368401322556902000ac6717622bc5c..61a1b3884b4954b1173cc5c21aced330acb2c938 100644 (file)
 #define PT32_ROOT_LEVEL 2
 #define PT32E_ROOT_LEVEL 3
 
-#define PFERR_PRESENT_MASK (1U << 0)
-#define PFERR_WRITE_MASK (1U << 1)
-#define PFERR_USER_MASK (1U << 2)
-#define PFERR_RSVD_MASK (1U << 3)
-#define PFERR_FETCH_MASK (1U << 4)
-
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 
 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
index 3bc270766f474452e05c78bafdae77bfb2025490..5fa33255348c9fb010191f41cf987e1c4406f917 100644 (file)
@@ -318,32 +318,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        break;
                }
 
-               if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
-                       struct kvm_mmu_page *child;
-                       unsigned direct_access;
-
-                       if (level != gw->level)
-                               continue;
-
-                       /*
-                        * For the direct sp, if the guest pte's dirty bit
-                        * changed form clean to dirty, it will corrupt the
-                        * sp's access: allow writable in the read-only sp,
-                        * so we should update the spte at this point to get
-                        * a new sp with the correct access.
-                        */
-                       direct_access = gw->pt_access & gw->pte_access;
-                       if (!is_dirty_gpte(gw->ptes[gw->level - 1]))
-                               direct_access &= ~ACC_WRITE_MASK;
-
-                       child = page_header(*sptep & PT64_BASE_ADDR_MASK);
-                       if (child->role.access == direct_access)
-                               continue;
-
-                       mmu_page_remove_parent_pte(child, sptep);
-                       __set_spte(sptep, shadow_trap_nonpresent_pte);
-                       kvm_flush_remote_tlbs(vcpu->kvm);
-               }
+               if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
+                       continue;
 
                if (is_large_pte(*sptep)) {
                        rmap_remove(vcpu->kvm, sptep);
@@ -360,7 +336,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        /* advance table_gfn when emulating 1gb pages with 4k */
                        if (delta == 0)
                                table_gfn += PT_INDEX(addr, level);
-                       access &= gw->pte_access;
                } else {
                        direct = 0;
                        table_gfn = gw->table_gfn[level - 2];
@@ -516,23 +491,18 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
-                              u32 *error)
+static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        struct guest_walker walker;
        gpa_t gpa = UNMAPPED_GVA;
        int r;
 
-       r = FNAME(walk_addr)(&walker, vcpu, vaddr,
-                            !!(access & PFERR_WRITE_MASK),
-                            !!(access & PFERR_USER_MASK),
-                            !!(access & PFERR_FETCH_MASK));
+       r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
 
        if (r) {
                gpa = gfn_to_gpa(walker.gfn);
                gpa |= vaddr & ~PAGE_MASK;
-       } else if (error)
-               *error = walker.error_code;
+       }
 
        return gpa;
 }
index 253153d2e3e2b18b0c448b71170afd9ea63220eb..c17404add91febfd66d12570057a12695e972d9b 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/sched.h>
 #include <linux/ftrace_event.h>
 
-#include <asm/tlbflush.h>
 #include <asm/desc.h>
 
 #include <asm/virtext.h>
@@ -63,8 +62,6 @@ MODULE_LICENSE("GPL");
 #define nsvm_printk(fmt, args...) do {} while(0)
 #endif
 
-static bool erratum_383_found __read_mostly;
-
 static const u32 host_save_user_msrs[] = {
 #ifdef CONFIG_X86_64
        MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
@@ -302,31 +299,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        svm_set_interrupt_shadow(vcpu, 0);
 }
 
-static void svm_init_erratum_383(void)
-{
-       u32 low, high;
-       int err;
-       u64 val;
-
-       /* Only Fam10h is affected */
-       if (boot_cpu_data.x86 != 0x10)
-               return;
-
-       /* Use _safe variants to not break nested virtualization */
-       val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
-       if (err)
-               return;
-
-       val |= (1ULL << 47);
-
-       low  = lower_32_bits(val);
-       high = upper_32_bits(val);
-
-       native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
-
-       erratum_383_found = true;
-}
-
 static int has_svm(void)
 {
        const char *msg;
@@ -346,6 +318,7 @@ static void svm_hardware_disable(void *garbage)
 
 static void svm_hardware_enable(void *garbage)
 {
+
        struct svm_cpu_data *svm_data;
        uint64_t efer;
        struct descriptor_table gdt_descr;
@@ -377,10 +350,6 @@ static void svm_hardware_enable(void *garbage)
 
        wrmsrl(MSR_VM_HSAVE_PA,
               page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
-
-       svm_init_erratum_383();
-
-       return;
 }
 
 static void svm_cpu_uninit(int cpu)
@@ -621,6 +590,7 @@ static void init_vmcb(struct vcpu_svm *svm)
 
        control->iopm_base_pa = iopm_base;
        control->msrpm_base_pa = __pa(svm->msrpm);
+       control->tsc_offset = 0;
        control->int_ctl = V_INTR_MASKING_MASK;
 
        init_seg(&save->es);
@@ -655,12 +625,11 @@ static void init_vmcb(struct vcpu_svm *svm)
        save->rip = 0x0000fff0;
        svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
 
-       /* This is the guest-visible cr0 value.
-        * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
+       /*
+        * cr0 val on cpu init should be 0x60000010, we enable cpu
+        * cache by default. the orderly way is to enable cache in bios.
         */
-       svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
-       kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
-
+       save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
        save->cr4 = X86_CR4_PAE;
        /* rdx = ?? */
 
@@ -724,28 +693,29 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_svm;
 
-       err = -ENOMEM;
        page = alloc_page(GFP_KERNEL);
-       if (!page)
+       if (!page) {
+               err = -ENOMEM;
                goto uninit;
+       }
 
+       err = -ENOMEM;
        msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!msrpm_pages)
-               goto free_page1;
+               goto uninit;
 
        nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!nested_msrpm_pages)
-               goto free_page2;
+               goto uninit;
+
+       svm->msrpm = page_address(msrpm_pages);
+       svm_vcpu_init_msrpm(svm->msrpm);
 
        hsave_page = alloc_page(GFP_KERNEL);
        if (!hsave_page)
-               goto free_page3;
-
+               goto uninit;
        svm->nested.hsave = page_address(hsave_page);
 
-       svm->msrpm = page_address(msrpm_pages);
-       svm_vcpu_init_msrpm(svm->msrpm);
-
        svm->nested.msrpm = page_address(nested_msrpm_pages);
 
        svm->vmcb = page_address(page);
@@ -753,7 +723,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
        svm->asid_generation = 0;
        init_vmcb(svm);
-       svm->vmcb->control.tsc_offset = 0-native_read_tsc();
 
        fx_init(&svm->vcpu);
        svm->vcpu.fpu_active = 1;
@@ -763,12 +732,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 
        return &svm->vcpu;
 
-free_page3:
-       __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
-free_page2:
-       __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
-free_page1:
-       __free_page(page);
 uninit:
        kvm_vcpu_uninit(&svm->vcpu);
 free_svm:
@@ -795,18 +758,17 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        int i;
 
        if (unlikely(cpu != vcpu->cpu)) {
-               u64 delta;
-
-               if (check_tsc_unstable()) {
-                       /*
-                        * Make sure that the guest sees a monotonically
-                        * increasing TSC.
-                        */
-                       delta = vcpu->arch.host_tsc - native_read_tsc();
-                       svm->vmcb->control.tsc_offset += delta;
-                       if (is_nested(svm))
-                               svm->nested.hsave->control.tsc_offset += delta;
-               }
+               u64 tsc_this, delta;
+
+               /*
+                * Make sure that the guest sees a monotonically
+                * increasing TSC.
+                */
+               rdtscll(tsc_this);
+               delta = vcpu->arch.host_tsc - tsc_this;
+               svm->vmcb->control.tsc_offset += delta;
+               if (is_nested(svm))
+                       svm->nested.hsave->control.tsc_offset += delta;
                vcpu->cpu = cpu;
                kvm_migrate_timers(vcpu);
                svm->asid_generation = 0;
@@ -1289,59 +1251,8 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
        return 1;
 }
 
-static bool is_erratum_383(void)
-{
-       int err, i;
-       u64 value;
-
-       if (!erratum_383_found)
-               return false;
-
-       value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
-       if (err)
-               return false;
-
-       /* Bit 62 may or may not be set for this mce */
-       value &= ~(1ULL << 62);
-
-       if (value != 0xb600000000010015ULL)
-               return false;
-
-       /* Clear MCi_STATUS registers */
-       for (i = 0; i < 6; ++i)
-               native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
-
-       value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
-       if (!err) {
-               u32 low, high;
-
-               value &= ~(1ULL << 2);
-               low    = lower_32_bits(value);
-               high   = upper_32_bits(value);
-
-               native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
-       }
-
-       /* Flush tlb to evict multi-match entries */
-       __flush_tlb_all();
-
-       return true;
-}
-
-static void svm_handle_mce(struct vcpu_svm *svm)
+static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       if (is_erratum_383()) {
-               /*
-                * Erratum 383 triggered. Guest state is corrupt so kill the
-                * guest.
-                */
-               pr_err("KVM: Guest triggered AMD Erratum 383\n");
-
-               set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests);
-
-               return;
-       }
-
        /*
         * On an #MC intercept the MCE handler is not called automatically in
         * the host. So do it by hand here.
@@ -1350,11 +1261,6 @@ static void svm_handle_mce(struct vcpu_svm *svm)
                "int $0x12\n");
        /* not sure if we ever come back to this point */
 
-       return;
-}
-
-static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
-{
        return 1;
 }
 
@@ -2112,7 +2018,7 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        ++svm->vcpu.stat.nmi_window_exits;
-       svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+       svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
        svm->vcpu.arch.hflags |= HF_IRET_MASK;
        return 1;
 }
@@ -2507,7 +2413,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
 
        svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
        vcpu->arch.hflags |= HF_NMI_MASK;
-       svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+       svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
        ++vcpu->stat.nmi_injections;
 }
 
@@ -2698,8 +2604,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        sync_lapic_to_cr8(vcpu);
 
        save_host_msrs(vcpu);
-       savesegment(fs, fs_selector);
-       savesegment(gs, gs_selector);
+       fs_selector = kvm_read_fs();
+       gs_selector = kvm_read_gs();
        ldt_selector = kvm_read_ldt();
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
        /* required for live migration with NPT */
@@ -2786,15 +2692,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
        vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 
-       load_host_msrs(vcpu);
-       loadsegment(fs, fs_selector);
-#ifdef CONFIG_X86_64
-       load_gs_index(gs_selector);
-       wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
-#else
-       loadsegment(gs, gs_selector);
-#endif
+       kvm_load_fs(fs_selector);
+       kvm_load_gs(gs_selector);
        kvm_load_ldt(ldt_selector);
+       load_host_msrs(vcpu);
 
        reload_tss(vcpu);
 
@@ -2810,14 +2711,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
                vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
        }
-
-       /*
-        * We need to handle MC intercepts here before the vcpu has a chance to
-        * change the physical cpu
-        */
-       if (unlikely(svm->vmcb->control.exit_code ==
-                    SVM_EXIT_EXCP_BASE + MC_VECTOR))
-               svm_handle_mce(svm);
 }
 
 #undef R
index d9c4fb6f0ff37545f95c29c520c3c9dde17c06c5..ed53b42caba119bb7b488efdf79170b44ba922e4 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <linux/ftrace_event.h>
-#include <linux/tboot.h>
 #include "kvm_cache_regs.h"
 #include "x86.h"
 
@@ -62,8 +61,6 @@ module_param_named(unrestricted_guest,
 static int __read_mostly emulate_invalid_guest_state = 0;
 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 
-#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
-
 struct vmcs {
        u32 revision_id;
        u32 abort;
@@ -95,7 +92,7 @@ struct vcpu_vmx {
        } host_state;
        struct {
                int vm86_active;
-               ulong save_rflags;
+               u8 save_iopl;
                struct kvm_save_segment {
                        u16 selector;
                        unsigned long base;
@@ -130,7 +127,6 @@ static u64 construct_eptp(unsigned long root_hpa);
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
-static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
 
 static unsigned long *vmx_io_bitmap_a;
 static unsigned long *vmx_io_bitmap_b;
@@ -629,7 +625,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
         */
        vmx->host_state.ldt_sel = kvm_read_ldt();
        vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
-       savesegment(fs, vmx->host_state.fs_sel);
+       vmx->host_state.fs_sel = kvm_read_fs();
        if (!(vmx->host_state.fs_sel & 7)) {
                vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
                vmx->host_state.fs_reload_needed = 0;
@@ -637,7 +633,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
                vmcs_write16(HOST_FS_SELECTOR, 0);
                vmx->host_state.fs_reload_needed = 1;
        }
-       savesegment(gs, vmx->host_state.gs_sel);
+       vmx->host_state.gs_sel = kvm_read_gs();
        if (!(vmx->host_state.gs_sel & 7))
                vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
        else {
@@ -654,7 +650,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 #endif
 
 #ifdef CONFIG_X86_64
-       save_msrs(vmx->host_msrs + vmx->msr_offset_kernel_gs_base, 1);
+       if (is_long_mode(&vmx->vcpu))
+               save_msrs(vmx->host_msrs +
+                         vmx->msr_offset_kernel_gs_base, 1);
+
 #endif
        load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
        load_transition_efer(vmx);
@@ -662,36 +661,32 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 
 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 {
+       unsigned long flags;
+
        if (!vmx->host_state.loaded)
                return;
 
        ++vmx->vcpu.stat.host_state_reload;
        vmx->host_state.loaded = 0;
        if (vmx->host_state.fs_reload_needed)
-               loadsegment(fs, vmx->host_state.fs_sel);
-#ifdef CONFIG_X86_64
-       if (is_long_mode(&vmx->vcpu))
-               save_msrs(vmx->guest_msrs + vmx->msr_offset_kernel_gs_base, 1);
-#endif
+               kvm_load_fs(vmx->host_state.fs_sel);
        if (vmx->host_state.gs_ldt_reload_needed) {
                kvm_load_ldt(vmx->host_state.ldt_sel);
+               /*
+                * If we have to reload gs, we must take care to
+                * preserve our gs base.
+                */
+               local_irq_save(flags);
+               kvm_load_gs(vmx->host_state.gs_sel);
 #ifdef CONFIG_X86_64
-               load_gs_index(vmx->host_state.gs_sel);
-#else
-               loadsegment(gs, vmx->host_state.gs_sel);
+               wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
 #endif
+               local_irq_restore(flags);
        }
        reload_tss();
-#ifdef CONFIG_X86_64
-       save_msrs(vmx->guest_msrs, vmx->msr_offset_kernel_gs_base);
-       save_msrs(vmx->guest_msrs + vmx->msr_offset_kernel_gs_base + 1,
-                 vmx->save_nmsrs - vmx->msr_offset_kernel_gs_base - 1);
-#else
        save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
-#endif
        load_msrs(vmx->host_msrs, vmx->save_nmsrs);
        reload_host_efer(vmx);
-       load_gdt(&__get_cpu_var(host_gdt));
 }
 
 static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -788,23 +783,18 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
-       unsigned long rflags, save_rflags;
+       unsigned long rflags;
 
        rflags = vmcs_readl(GUEST_RFLAGS);
-       if (to_vmx(vcpu)->rmode.vm86_active) {
-               rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
-               save_rflags = to_vmx(vcpu)->rmode.save_rflags;
-               rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
-       }
+       if (to_vmx(vcpu)->rmode.vm86_active)
+               rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
        return rflags;
 }
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
-       if (to_vmx(vcpu)->rmode.vm86_active) {
-               to_vmx(vcpu)->rmode.save_rflags = rflags;
+       if (to_vmx(vcpu)->rmode.vm86_active)
                rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
-       }
        vmcs_writel(GUEST_RFLAGS, rflags);
 }
 
@@ -1143,16 +1133,9 @@ static __init int vmx_disabled_by_bios(void)
        u64 msr;
 
        rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
-       if (msr & FEATURE_CONTROL_LOCKED) {
-               if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
-                       && tboot_enabled())
-                       return 1;
-               if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
-                       && !tboot_enabled())
-                       return 1;
-       }
-
-       return 0;
+       return (msr & (FEATURE_CONTROL_LOCKED |
+                      FEATURE_CONTROL_VMXON_ENABLED))
+           == FEATURE_CONTROL_LOCKED;
        /* locked but not enabled */
 }
 
@@ -1160,26 +1143,22 @@ static void hardware_enable(void *garbage)
 {
        int cpu = raw_smp_processor_id();
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
-       u64 old, test_bits;
+       u64 old;
 
        INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
        rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
-
-       test_bits = FEATURE_CONTROL_LOCKED;
-       test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
-       if (tboot_enabled())
-               test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
-
-       if ((old & test_bits) != test_bits) {
+       if ((old & (FEATURE_CONTROL_LOCKED |
+                   FEATURE_CONTROL_VMXON_ENABLED))
+           != (FEATURE_CONTROL_LOCKED |
+               FEATURE_CONTROL_VMXON_ENABLED))
                /* enable and lock */
-               wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
-       }
+               wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
+                      FEATURE_CONTROL_LOCKED |
+                      FEATURE_CONTROL_VMXON_ENABLED);
        write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
        asm volatile (ASM_VMX_VMXON_RAX
                      : : "a"(&phys_addr), "m"(phys_addr)
                      : "memory", "cc");
-
-       store_gdt(&__get_cpu_var(host_gdt));
 }
 
 static void vmclear_local_vcpus(void)
@@ -1452,8 +1431,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
-       flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+       flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+       flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
        vmcs_writel(GUEST_RFLAGS, flags);
 
        vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1522,7 +1501,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       vmx->rmode.save_rflags = flags;
+       vmx->rmode.save_iopl
+               = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
 
        flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
 
@@ -2322,10 +2302,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                                ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
                if (vmx->vpid == 0)
                        exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
-               if (!enable_ept) {
+               if (!enable_ept)
                        exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
-                       enable_unrestricted_guest = 0;
-               }
                if (!enable_unrestricted_guest)
                        exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
@@ -2342,8 +2320,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
        vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
        vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
-       vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
-       vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
+       vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs());    /* 22.2.4 */
+       vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs());    /* 22.2.4 */
        vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
 #ifdef CONFIG_X86_64
        rdmsrl(MSR_FS_BASE, a);
@@ -2532,7 +2510,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        if (vmx->vpid != 0)
                vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
-       vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+       vmx->vcpu.arch.cr0 = 0x60000010;
        vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
        vmx_set_cr4(&vmx->vcpu, 0);
        vmx_set_efer(&vmx->vcpu, 0);
@@ -2696,12 +2674,6 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
                kvm_queue_exception(vcpu, vec);
                return 1;
        case BP_VECTOR:
-               /*
-                * Update instruction length as we may reinject the exception
-                * from user space while in guest debugging mode.
-                */
-               to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
-                       vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
                        return 0;
                /* fall through */
@@ -2818,13 +2790,6 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
                /* fall through */
        case BP_VECTOR:
-               /*
-                * Update instruction length as we may reinject #BP from
-                * user space while in guest debugging mode. Reading it for
-                * #DB as well causes no harm, it is not used in that case.
-                */
-               vmx->vcpu.arch.event_exit_inst_len =
-                       vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
                kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
                kvm_run->debug.arch.exception = ex_no;
index b2c02a2b00381ca22e47df3180d03980fcf810c7..e78d9907e0eef7719b4b52be2bb37217e2ab8f45 100644 (file)
@@ -297,16 +297,21 @@ out:
 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        if (cr0 & CR0_RESERVED_BITS) {
+               printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
+                      cr0, vcpu->arch.cr0);
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
+               printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
+               printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
+                      "and a clear PE flag\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -317,11 +322,15 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
+                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
+                                      "in long mode while PAE is disabled\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
                        if (cs_l) {
+                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
+                                      "in long mode while CS.L == 1\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
 
@@ -329,6 +338,8 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                } else
 #endif
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
+                       printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
+                              "reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -345,7 +356,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
 
 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 {
-       kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0eul) | (msw & 0x0f));
+       kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
@@ -355,23 +366,28 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
 
        if (cr4 & CR4_RESERVED_BITS) {
+               printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE)) {
+                       printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
+                              "in long mode\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
                   && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
+               printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (cr4 & X86_CR4_VMXE) {
+               printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -392,16 +408,21 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        if (is_long_mode(vcpu)) {
                if (cr3 & CR3_L_MODE_RESERVED_BITS) {
+                       printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else {
                if (is_pae(vcpu)) {
                        if (cr3 & CR3_PAE_RESERVED_BITS) {
+                               printk(KERN_DEBUG
+                                      "set_cr3: #GP, reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
+                               printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
+                                      "reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
@@ -433,6 +454,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        if (cr8 & CR8_RESERVED_BITS) {
+               printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -483,42 +505,53 @@ static u32 emulated_msrs[] = {
        MSR_IA32_MISC_ENABLE,
 };
 
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-       if (efer & efer_reserved_bits)
-               return 1;
+       if (efer & efer_reserved_bits) {
+               printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
+                      efer);
+               kvm_inject_gp(vcpu, 0);
+               return;
+       }
 
        if (is_paging(vcpu)
-           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME))
-               return 1;
+           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+               printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
+               kvm_inject_gp(vcpu, 0);
+               return;
+       }
 
        if (efer & EFER_FFXSR) {
                struct kvm_cpuid_entry2 *feat;
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-               if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
-                       return 1;
+               if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
+                       printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
+                       kvm_inject_gp(vcpu, 0);
+                       return;
+               }
        }
 
        if (efer & EFER_SVME) {
                struct kvm_cpuid_entry2 *feat;
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-               if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
-                       return 1;
+               if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
+                       printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
+                       kvm_inject_gp(vcpu, 0);
+                       return;
+               }
        }
 
+       kvm_x86_ops->set_efer(vcpu, efer);
+
        efer &= ~EFER_LMA;
        efer |= vcpu->arch.shadow_efer & EFER_LMA;
 
-       kvm_x86_ops->set_efer(vcpu, efer);
-
        vcpu->arch.shadow_efer = efer;
 
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
-
-       return 0;
 }
 
 void kvm_enable_efer_bits(u64 mask)
@@ -548,22 +581,14 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 
 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 {
-       int version;
-       int r;
+       static int version;
        struct pvclock_wall_clock wc;
        struct timespec boot;
 
        if (!wall_clock)
                return;
 
-       r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
-       if (r)
-               return;
-
-       if (version & 1)
-               ++version;  /* first time write, random junk */
-
-       ++version;
+       version++;
 
        kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
 
@@ -801,13 +826,9 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (msr >= MSR_IA32_MC0_CTL &&
                    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
                        u32 offset = msr - MSR_IA32_MC0_CTL;
-                       /* only 0 or all 1s can be written to IA32_MCi_CTL
-                        * some Linux kernels though clear bit 10 in bank 4 to
-                        * workaround a BIOS/GART TBL issue on AMD K8s, ignore
-                        * this to avoid an uncatched #GP in the guest
-                        */
+                       /* only 0 or all 1s can be written to IA32_MCi_CTL */
                        if ((offset & 0x3) == 0 &&
-                           data != 0 && (data | (1 << 10)) != ~(u64)0)
+                           data != 0 && data != ~(u64)0)
                                return -1;
                        vcpu->arch.mce_banks[offset] = data;
                        break;
@@ -821,7 +842,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
        case MSR_EFER:
-               return set_efer(vcpu, data);
+               set_efer(vcpu, data);
+               break;
        case MSR_K7_HWCR:
                data &= ~(u64)0x40;     /* ignore flush filter disable */
                if (data != 0) {
@@ -1220,8 +1242,8 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_NR_MEMSLOTS:
                r = KVM_MEMORY_SLOTS;
                break;
-       case KVM_CAP_PV_MMU:    /* obsolete */
-               r = 0;
+       case KVM_CAP_PV_MMU:
+               r = !tdp_enabled;
                break;
        case KVM_CAP_IOMMU:
                r = iommu_found();
@@ -1413,7 +1435,6 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
 {
        int r;
 
-       vcpu_load(vcpu);
        r = -E2BIG;
        if (cpuid->nent < vcpu->arch.cpuid_nent)
                goto out;
@@ -1425,7 +1446,6 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
 
 out:
        cpuid->nent = vcpu->arch.cpuid_nent;
-       vcpu_put(vcpu);
        return r;
 }
 
@@ -1485,7 +1505,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        const u32 kvm_supported_word6_x86_features =
                F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
                F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
-               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
                0 /* SKINIT */ | 0 /* WDT */;
 
        /* all calls to cpuid_count() should be made on the same cpu */
@@ -1675,7 +1695,6 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
        int r;
        unsigned bank_num = mcg_cap & 0xff, bank;
 
-       vcpu_load(vcpu);
        r = -EINVAL;
        if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
                goto out;
@@ -1690,7 +1709,6 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
        for (bank = 0; bank < bank_num; bank++)
                vcpu->arch.mce_banks[bank*4] = ~(u64)0;
 out:
-       vcpu_put(vcpu);
        return r;
 }
 
@@ -1893,9 +1911,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&mce, argp, sizeof mce))
                        goto out;
-               vcpu_load(vcpu);
                r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
-               vcpu_put(vcpu);
                break;
        }
        default:
@@ -2102,7 +2118,6 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
                sizeof(ps->channels));
        ps->flags = kvm->arch.vpit->pit_state.flags;
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
-       memset(&ps->reserved, 0, sizeof(ps->reserved));
        return r;
 }
 
@@ -2141,7 +2156,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                      struct kvm_dirty_log *log)
 {
        int r;
-       unsigned long n;
+       int n;
        struct kvm_memory_slot *memslot;
        int is_dirty = 0;
 
@@ -2157,7 +2172,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                kvm_mmu_slot_remove_write_access(kvm, log->slot);
                spin_unlock(&kvm->mmu_lock);
                memslot = &kvm->memslots[log->slot];
-               n = kvm_dirty_bitmap_bytes(memslot);
+               n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
                memset(memslot->dirty_bitmap, 0, n);
        }
        r = 0;
@@ -2440,7 +2455,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
                now_ns = timespec_to_ns(&now);
                user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
                user_ns.flags = 0;
-               memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
                r = -EFAULT;
                if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -2491,41 +2505,14 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
        return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
 }
 
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
-{
-       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
-}
-
- gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
-{
-       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       access |= PFERR_FETCH_MASK;
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
-}
-
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
-{
-       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       access |= PFERR_WRITE_MASK;
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
-}
-
-/* uses this to access any guest's mapped memory without checking CPL */
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
-{
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
-}
-
-static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
-                                     struct kvm_vcpu *vcpu, u32 access,
-                                     u32 *error)
+static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
+                              struct kvm_vcpu *vcpu)
 {
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
        while (bytes) {
-               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
+               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
@@ -2548,37 +2535,14 @@ out:
        return r;
 }
 
-/* used for instruction fetching */
-static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                               struct kvm_vcpu *vcpu, u32 *error)
-{
-       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-                                         access | PFERR_FETCH_MASK, error);
-}
-
-static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                              struct kvm_vcpu *vcpu, u32 *error)
-{
-       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
-                                         error);
-}
-
-static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
-                              struct kvm_vcpu *vcpu, u32 *error)
-{
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
-}
-
 static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                               struct kvm_vcpu *vcpu, u32 *error)
+                               struct kvm_vcpu *vcpu)
 {
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
        while (bytes) {
-               gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
+               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
@@ -2608,7 +2572,6 @@ static int emulator_read_emulated(unsigned long addr,
                                  struct kvm_vcpu *vcpu)
 {
        gpa_t                 gpa;
-       u32 error_code;
 
        if (vcpu->mmio_read_completed) {
                memcpy(val, vcpu->mmio_data, bytes);
@@ -2618,20 +2581,17 @@ static int emulator_read_emulated(unsigned long addr,
                return X86EMUL_CONTINUE;
        }
 
-       gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
-
-       if (gpa == UNMAPPED_GVA) {
-               kvm_inject_page_fault(vcpu, addr, error_code);
-               return X86EMUL_PROPAGATE_FAULT;
-       }
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
 
        /* For APIC access vmexit */
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
                goto mmio;
 
-       if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
+       if (kvm_read_guest_virt(addr, val, bytes, vcpu)
                                == X86EMUL_CONTINUE)
                return X86EMUL_CONTINUE;
+       if (gpa == UNMAPPED_GVA)
+               return X86EMUL_PROPAGATE_FAULT;
 
 mmio:
        /*
@@ -2670,12 +2630,11 @@ static int emulator_write_emulated_onepage(unsigned long addr,
                                           struct kvm_vcpu *vcpu)
 {
        gpa_t                 gpa;
-       u32 error_code;
 
-       gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
 
        if (gpa == UNMAPPED_GVA) {
-               kvm_inject_page_fault(vcpu, addr, error_code);
+               kvm_inject_page_fault(vcpu, addr, 2);
                return X86EMUL_PROPAGATE_FAULT;
        }
 
@@ -2739,7 +2698,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
                char *kaddr;
                u64 val;
 
-               gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
+               gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
 
                if (gpa == UNMAPPED_GVA ||
                   (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -2784,9 +2743,6 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 {
        struct kvm_vcpu *vcpu = ctxt->vcpu;
 
-       if (!kvm_x86_ops->get_dr)
-               return X86EMUL_UNHANDLEABLE;
-
        switch (dr) {
        case 0 ... 3:
                *dest = kvm_x86_ops->get_dr(vcpu, dr);
@@ -2802,9 +2758,6 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
        int exception;
 
-       if (!kvm_x86_ops->set_dr)
-               return X86EMUL_UNHANDLEABLE;
-
        kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
        if (exception) {
                /* FIXME: better handling */
@@ -2824,7 +2777,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 
        rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
 
-       kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
+       kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
 
        printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
               context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
@@ -2832,8 +2785,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
 
 static struct x86_emulate_ops emulate_ops = {
-       .read_std            = kvm_read_guest_virt_system,
-       .fetch               = kvm_fetch_guest_virt,
+       .read_std            = kvm_read_guest_virt,
        .read_emulated       = emulator_read_emulated,
        .write_emulated      = emulator_write_emulated,
        .cmpxchg_emulated    = emulator_cmpxchg_emulated,
@@ -2876,9 +2828,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                vcpu->arch.emulate_ctxt.vcpu = vcpu;
                vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
                vcpu->arch.emulate_ctxt.mode =
-                       (!(vcpu->arch.cr0 & X86_CR0_PE)) ? X86EMUL_MODE_REAL :
                        (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
-                       ? X86EMUL_MODE_VM86 : cs_l
+                       ? X86EMUL_MODE_REAL : cs_l
                        ? X86EMUL_MODE_PROT64 : cs_db
                        ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 
@@ -2970,17 +2921,12 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
        gva_t q = vcpu->arch.pio.guest_gva;
        unsigned bytes;
        int ret;
-       u32 error_code;
 
        bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
        if (vcpu->arch.pio.in)
-               ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
+               ret = kvm_write_guest_virt(q, p, bytes, vcpu);
        else
-               ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
-
-       if (ret == X86EMUL_PROPAGATE_FAULT)
-               kvm_inject_page_fault(vcpu, q, error_code);
-
+               ret = kvm_read_guest_virt(q, p, bytes, vcpu);
        return ret;
 }
 
@@ -3001,7 +2947,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
                if (io->in) {
                        r = pio_copy_data(vcpu);
                        if (r)
-                               goto out;
+                               return r;
                }
 
                delta = 1;
@@ -3028,7 +2974,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
                        kvm_register_write(vcpu, VCPU_REGS_RSI, val);
                }
        }
-out:
+
        io->count -= io->cur_count;
        io->cur_count = 0;
 
@@ -3071,8 +3017,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
 {
        unsigned long val;
 
-       trace_kvm_pio(!in, port, size, 1);
-
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
        vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3084,6 +3028,9 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->arch.pio.down = 0;
        vcpu->arch.pio.rep = 0;
 
+       trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
+                     size, 1);
+
        val = kvm_register_read(vcpu, VCPU_REGS_RAX);
        memcpy(vcpu->arch.pio_data, &val, 4);
 
@@ -3102,8 +3049,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        unsigned now, in_page;
        int ret = 0;
 
-       trace_kvm_pio(!in, port, size, count);
-
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
        vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3115,6 +3060,9 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->arch.pio.down = down;
        vcpu->arch.pio.rep = rep;
 
+       trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
+                     size, count);
+
        if (!count) {
                kvm_x86_ops->skip_emulated_instruction(vcpu);
                return 1;
@@ -3146,8 +3094,10 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        if (!vcpu->arch.pio.in) {
                /* string PIO write */
                ret = pio_copy_data(vcpu);
-               if (ret == X86EMUL_PROPAGATE_FAULT)
+               if (ret == X86EMUL_PROPAGATE_FAULT) {
+                       kvm_inject_gp(vcpu, 0);
                        return 1;
+               }
                if (ret == 0 && !pio_string_write(vcpu)) {
                        complete_pio(vcpu);
                        if (vcpu->arch.pio.count == 0)
@@ -4127,9 +4077,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
                return 1;
        }
-       return kvm_read_guest_virt_system(dtable.base + index*8,
-                                         seg_desc, sizeof(*seg_desc),
-                                         vcpu, NULL);
+       return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
 }
 
 /* allowed just for 8 bytes segments */
@@ -4143,23 +4091,15 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 
        if (dtable.limit < index * 8 + 7)
                return 1;
-       return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
+       return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
 }
 
-static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
-                              struct desc_struct *seg_desc)
-{
-       u32 base_addr = get_desc_base(seg_desc);
-
-       return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
-}
-
-static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
+static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
                             struct desc_struct *seg_desc)
 {
        u32 base_addr = get_desc_base(seg_desc);
 
-       return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
+       return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
 }
 
 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -4170,6 +4110,18 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
        return kvm_seg.selector;
 }
 
+static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
+                                               u16 selector,
+                                               struct kvm_segment *kvm_seg)
+{
+       struct desc_struct seg_desc;
+
+       if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
+               return 1;
+       seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
+       return 0;
+}
+
 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
 {
        struct kvm_segment segvar = {
@@ -4187,7 +4139,7 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se
                .unusable = 0,
        };
        kvm_x86_ops->set_segment(vcpu, &segvar, seg);
-       return X86EMUL_CONTINUE;
+       return 0;
 }
 
 static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
@@ -4197,113 +4149,24 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
                (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM);
 }
 
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                               int type_bits, int seg)
 {
        struct kvm_segment kvm_seg;
-       struct desc_struct seg_desc;
-       u8 dpl, rpl, cpl;
-       unsigned err_vec = GP_VECTOR;
-       u32 err_code = 0;
-       bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
-       int ret;
 
        if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
                return kvm_load_realmode_segment(vcpu, selector, seg);
+       if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
+               return 1;
+       kvm_seg.type |= type_bits;
 
+       if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
+           seg != VCPU_SREG_LDTR)
+               if (!kvm_seg.s)
+                       kvm_seg.unusable = 1;
 
-       /* NULL selector is not valid for TR, CS and SS */
-       if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
-           && null_selector)
-               goto exception;
-
-       /* TR should be in GDT only */
-       if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
-               goto exception;
-
-       ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
-       if (ret)
-               return ret;
-
-       seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
-
-       if (null_selector) { /* for NULL selector skip all following checks */
-               kvm_seg.unusable = 1;
-               goto load;
-       }
-
-       err_code = selector & 0xfffc;
-       err_vec = GP_VECTOR;
-
-       /* can't load system descriptor into segment selecor */
-       if (seg <= VCPU_SREG_GS && !kvm_seg.s)
-               goto exception;
-
-       if (!kvm_seg.present) {
-               err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
-               goto exception;
-       }
-
-       rpl = selector & 3;
-       dpl = kvm_seg.dpl;
-       cpl = kvm_x86_ops->get_cpl(vcpu);
-
-       switch (seg) {
-       case VCPU_SREG_SS:
-               /*
-                * segment is not a writable data segment or segment
-                * selector's RPL != CPL or segment selector's RPL != CPL
-                */
-               if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
-                       goto exception;
-               break;
-       case VCPU_SREG_CS:
-               if (!(kvm_seg.type & 8))
-                       goto exception;
-
-               if (kvm_seg.type & 4) {
-                       /* conforming */
-                       if (dpl > cpl)
-                               goto exception;
-               } else {
-                       /* nonconforming */
-                       if (rpl > cpl || dpl != cpl)
-                               goto exception;
-               }
-               /* CS(RPL) <- CPL */
-               selector = (selector & 0xfffc) | cpl;
-               break;
-       case VCPU_SREG_TR:
-               if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
-                       goto exception;
-               break;
-       case VCPU_SREG_LDTR:
-               if (kvm_seg.s || kvm_seg.type != 2)
-                       goto exception;
-               break;
-       default: /*  DS, ES, FS, or GS */
-               /*
-                * segment is not a data or readable code segment or
-                * ((segment is a data or nonconforming code segment)
-                * and (both RPL and CPL > DPL))
-                */
-               if ((kvm_seg.type & 0xa) == 0x8 ||
-                   (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
-                       goto exception;
-               break;
-       }
-
-       if (!kvm_seg.unusable && kvm_seg.s) {
-               /* mark segment as accessed */
-               kvm_seg.type |= 1;
-               seg_desc.type |= 1;
-               save_guest_segment_descriptor(vcpu, selector, &seg_desc);
-       }
-load:
        kvm_set_segment(vcpu, &kvm_seg, seg);
-       return X86EMUL_CONTINUE;
-exception:
-       kvm_queue_exception_e(vcpu, err_vec, err_code);
-       return X86EMUL_PROPAGATE_FAULT;
+       return 0;
 }
 
 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
@@ -4329,14 +4192,6 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
        tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
 }
 
-static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
-{
-       struct kvm_segment kvm_seg;
-       kvm_get_segment(vcpu, &kvm_seg, seg);
-       kvm_seg.selector = sel;
-       kvm_set_segment(vcpu, &kvm_seg, seg);
-}
-
 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
                                  struct tss_segment_32 *tss)
 {
@@ -4354,41 +4209,25 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
        kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
        kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
 
-       /*
-        * SDM says that segment selectors are loaded before segment
-        * descriptors
-        */
-       kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
-       kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
-       kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
-       kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
-       kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
-       kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
-       kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
-
-       /*
-        * Now load segment descriptors. If fault happenes at this stage
-        * it is handled in a context of new task
-        */
-       if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
+       if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
+       if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
+       if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
+       if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
+       if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
                return 1;
        return 0;
 }
@@ -4429,33 +4268,19 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
        kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
        kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
 
-       /*
-        * SDM says that segment selectors are loaded before segment
-        * descriptors
-        */
-       kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
-       kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
-       kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
-       kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
-       kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
-
-       /*
-        * Now load segment descriptors. If fault happenes at this stage
-        * it is handled in a context of new task
-        */
-       if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
+       if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
+       if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
+       if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
                return 1;
        return 0;
 }
@@ -4477,7 +4302,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
                            sizeof tss_segment_16))
                goto out;
 
-       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
                           &tss_segment_16, sizeof tss_segment_16))
                goto out;
 
@@ -4485,7 +4310,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
                tss_segment_16.prev_task_link = old_tss_sel;
 
                if (kvm_write_guest(vcpu->kvm,
-                                   get_tss_base_addr_write(vcpu, nseg_desc),
+                                   get_tss_base_addr(vcpu, nseg_desc),
                                    &tss_segment_16.prev_task_link,
                                    sizeof tss_segment_16.prev_task_link))
                        goto out;
@@ -4516,7 +4341,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
                            sizeof tss_segment_32))
                goto out;
 
-       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
                           &tss_segment_32, sizeof tss_segment_32))
                goto out;
 
@@ -4524,7 +4349,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
                tss_segment_32.prev_task_link = old_tss_sel;
 
                if (kvm_write_guest(vcpu->kvm,
-                                   get_tss_base_addr_write(vcpu, nseg_desc),
+                                   get_tss_base_addr(vcpu, nseg_desc),
                                    &tss_segment_32.prev_task_link,
                                    sizeof tss_segment_32.prev_task_link))
                        goto out;
@@ -4546,9 +4371,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        int ret = 0;
        u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
        u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
-       u32 desc_limit;
 
-       old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
+       old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
 
        /* FIXME: Handle errors. Failure to read either TSS or their
         * descriptors should generate a pagefault.
@@ -4569,10 +4393,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
                }
        }
 
-       desc_limit = get_desc_limit(&nseg_desc);
-       if (!nseg_desc.p ||
-           ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
-            desc_limit < 0x2b)) {
+       if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
                kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
                return 1;
        }
@@ -4760,7 +4581,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 
        vcpu_load(vcpu);
        down_read(&vcpu->kvm->slots_lock);
-       gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
        up_read(&vcpu->kvm->slots_lock);
        tr->physical_address = gpa;
        tr->valid = gpa != UNMAPPED_GVA;
index ac2d426ea35fc867f680e27e6e389c1799bed26d..c2b6f395a022bfd881e37c2523925df7dd0731d8 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for x86 specific library files.
 #
 
-obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
+obj-$(CONFIG_SMP) += msr-smp.o
 
 lib-y := delay.o
 lib-y += thunk_$(BITS).o
@@ -26,5 +26,4 @@ else
         lib-y += thunk_64.o clear_page_64.o copy_page_64.o
         lib-y += memmove_64.o memset_64.o
         lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
-       lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
 endif
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
deleted file mode 100644 (file)
index a3c6688..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/smp.h>
-#include <linux/module.h>
-
-static void __wbinvd(void *dummy)
-{
-       wbinvd();
-}
-
-void wbinvd_on_cpu(int cpu)
-{
-       smp_call_function_single(cpu, __wbinvd, NULL, 1);
-}
-EXPORT_SYMBOL(wbinvd_on_cpu);
-
-int wbinvd_on_all_cpus(void)
-{
-       return on_each_cpu(__wbinvd, NULL, 1);
-}
-EXPORT_SYMBOL(wbinvd_on_all_cpus);
diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
deleted file mode 100644 (file)
index 15acecf..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * x86-64 rwsem wrappers
- *
- * This interfaces the inline asm code to the slow-path
- * C routines. We need to save the call-clobbered regs
- * that the asm does not mark as clobbered, and move the
- * argument from %rax to %rdi.
- *
- * NOTE! We don't need to save %rax, because the functions
- * will always return the semaphore pointer in %rax (which
- * is also the input argument to these helpers)
- *
- * The following can clobber %rdx because the asm clobbers it:
- *   call_rwsem_down_write_failed
- *   call_rwsem_wake
- * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
- */
-
-#include <linux/linkage.h>
-#include <asm/rwlock.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-#include <asm/dwarf2.h>
-
-#define save_common_regs \
-       pushq %rdi; \
-       pushq %rsi; \
-       pushq %rcx; \
-       pushq %r8; \
-       pushq %r9; \
-       pushq %r10; \
-       pushq %r11
-
-#define restore_common_regs \
-       popq %r11; \
-       popq %r10; \
-       popq %r9; \
-       popq %r8; \
-       popq %rcx; \
-       popq %rsi; \
-       popq %rdi
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_down_read_failed)
-       save_common_regs
-       pushq %rdx
-       movq %rax,%rdi
-       call rwsem_down_read_failed
-       popq %rdx
-       restore_common_regs
-       ret
-       ENDPROC(call_rwsem_down_read_failed)
-
-ENTRY(call_rwsem_down_write_failed)
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_down_write_failed
-       restore_common_regs
-       ret
-       ENDPROC(call_rwsem_down_write_failed)
-
-ENTRY(call_rwsem_wake)
-       decw %dx    /* do nothing if still outstanding active readers */
-       jnz 1f
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_wake
-       restore_common_regs
-1:     ret
-       ENDPROC(call_rwsem_wake)
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_downgrade_wake)
-       save_common_regs
-       pushq %rdx
-       movq %rax,%rdi
-       call rwsem_downgrade_wake
-       popq %rdx
-       restore_common_regs
-       ret
-       ENDPROC(call_rwsem_downgrade_wake)
index 1739358b444d834b1adecdd02681e45216ae0eed..f4cee9028cf0b01e11951662b625f63371f627e6 100644 (file)
@@ -801,10 +801,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
        up_read(&mm->mmap_sem);
 
        /* Kernel mode? Handle exceptions or die: */
-       if (!(error_code & PF_USER)) {
+       if (!(error_code & PF_USER))
                no_context(regs, error_code, address);
-               return;
-       }
 
        /* User-space => ok to do another page fault: */
        if (is_prefetch(regs, error_code, address))
index 7d095ad545358b5868a8e4d9331d181f8a9844bc..5a4398a6006bcca5a0bce02e53f06601f399a09e 100644 (file)
@@ -49,7 +49,6 @@
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
 #include <asm/init.h>
-#include <linux/bootmem.h>
 
 static unsigned long dma_reserve __initdata;
 
@@ -615,21 +614,6 @@ void __init paging_init(void)
  * Memory hotplug specific functions
  */
 #ifdef CONFIG_MEMORY_HOTPLUG
-/*
- * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
- * updating.
- */
-static void  update_end_of_memory_vars(u64 start, u64 size)
-{
-       unsigned long end_pfn = PFN_UP(start + size);
-
-       if (end_pfn > max_pfn) {
-               max_pfn = end_pfn;
-               max_low_pfn = end_pfn;
-               high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
-       }
-}
-
 /*
  * Memory is added always to NORMAL zone. This means you will never get
  * additional DMA/DMA32 memory.
@@ -649,9 +633,6 @@ int arch_add_memory(int nid, u64 start, u64 size)
        ret = __add_pages(nid, zone, start_pfn, nr_pages);
        WARN_ON_ONCE(ret);
 
-       /* update max_pfn, max_low_pfn and high_memory */
-       update_end_of_memory_vars(start, size);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(arch_add_memory);
index c9ba9deafe83f30daaae9b0e998c3de715671d43..ed34f5e35999449a488be43ced0e580319b373f1 100644 (file)
@@ -6,14 +6,6 @@
 
 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
 
-#ifdef CONFIG_HIGHPTE
-#define PGALLOC_USER_GFP __GFP_HIGHMEM
-#else
-#define PGALLOC_USER_GFP 0
-#endif
-
-gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
-
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
        return (pte_t *)__get_free_page(PGALLOC_GFP);
@@ -23,29 +15,16 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
 
-       pte = alloc_pages(__userpte_alloc_gfp, 0);
+#ifdef CONFIG_HIGHPTE
+       pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
+#else
+       pte = alloc_pages(PGALLOC_GFP, 0);
+#endif
        if (pte)
                pgtable_page_ctor(pte);
        return pte;
 }
 
-static int __init setup_userpte(char *arg)
-{
-       if (!arg)
-               return -EINVAL;
-
-       /*
-        * "userpte=nohigh" disables allocation of user pagetables in
-        * high memory.
-        */
-       if (strcmp(arg, "nohigh") == 0)
-               __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
-       else
-               return -EINVAL;
-       return 0;
-}
-early_param("userpte", setup_userpte);
-
 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 {
        pgtable_page_dtor(pte);
index ca6b33667f54c29d6f5f87126c632e9448d5bba2..3347f696edc77d4692f7b0b0fdb774677b573cfa 100644 (file)
@@ -95,10 +95,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
 static void nmi_cpu_start(void *dummy)
 {
        struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
-       if (!msrs->controls)
-               WARN_ON_ONCE(1);
-       else
-               model->start(msrs);
+       model->start(msrs);
 }
 
 static int nmi_start(void)
@@ -110,10 +107,7 @@ static int nmi_start(void)
 static void nmi_cpu_stop(void *dummy)
 {
        struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
-       if (!msrs->controls)
-               WARN_ON_ONCE(1);
-       else
-               model->stop(msrs);
+       model->stop(msrs);
 }
 
 static void nmi_stop(void)
@@ -165,7 +159,7 @@ static int nmi_setup_mux(void)
 
        for_each_possible_cpu(i) {
                per_cpu(cpu_msrs, i).multiplex =
-                       kzalloc(multiplex_size, GFP_KERNEL);
+                       kmalloc(multiplex_size, GFP_KERNEL);
                if (!per_cpu(cpu_msrs, i).multiplex)
                        return 0;
        }
@@ -185,6 +179,7 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
                if (counter_config[i].enabled) {
                        multiplex[i].saved = -(u64)counter_config[i].count;
                } else {
+                       multiplex[i].addr  = 0;
                        multiplex[i].saved = 0;
                }
        }
@@ -194,27 +189,25 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
 
 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
 {
-       struct op_msr *counters = msrs->counters;
        struct op_msr *multiplex = msrs->multiplex;
        int i;
 
        for (i = 0; i < model->num_counters; ++i) {
                int virt = op_x86_phys_to_virt(i);
-               if (counters[i].addr)
-                       rdmsrl(counters[i].addr, multiplex[virt].saved);
+               if (multiplex[virt].addr)
+                       rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
        }
 }
 
 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
 {
-       struct op_msr *counters = msrs->counters;
        struct op_msr *multiplex = msrs->multiplex;
        int i;
 
        for (i = 0; i < model->num_counters; ++i) {
                int virt = op_x86_phys_to_virt(i);
-               if (counters[i].addr)
-                       wrmsrl(counters[i].addr, multiplex[virt].saved);
+               if (multiplex[virt].addr)
+                       wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
        }
 }
 
@@ -310,11 +303,11 @@ static int allocate_msrs(void)
 
        int i;
        for_each_possible_cpu(i) {
-               per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
+               per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
                                                        GFP_KERNEL);
                if (!per_cpu(cpu_msrs, i).counters)
                        return 0;
-               per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
+               per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
                                                        GFP_KERNEL);
                if (!per_cpu(cpu_msrs, i).controls)
                        return 0;
@@ -518,13 +511,8 @@ static int __init init_sysfs(void)
        int error;
 
        error = sysdev_class_register(&oprofile_sysclass);
-       if (error)
-               return error;
-
-       error = sysdev_register(&device_oprofile);
-       if (error)
-               sysdev_class_unregister(&oprofile_sysclass);
-
+       if (!error)
+               error = sysdev_register(&device_oprofile);
        return error;
 }
 
@@ -535,10 +523,8 @@ static void exit_sysfs(void)
 }
 
 #else
-
-static inline int  init_sysfs(void) { return 0; }
-static inline void exit_sysfs(void) { }
-
+#define init_sysfs() do { } while (0)
+#define exit_sysfs() do { } while (0)
 #endif /* CONFIG_PM */
 
 static int __init p4_init(char **cpu_type)
@@ -591,18 +577,6 @@ static int __init ppro_init(char **cpu_type)
        if (force_arch_perfmon && cpu_has_arch_perfmon)
                return 0;
 
-       /*
-        * Documentation on identifying Intel processors by CPU family
-        * and model can be found in the Intel Software Developer's
-        * Manuals (SDM):
-        *
-        *  http://www.intel.com/products/processor/manuals/
-        *
-        * As of May 2010 the documentation for this was in the:
-        * "Intel 64 and IA-32 Architectures Software Developer's
-        * Manual Volume 3B: System Programming Guide", "Table B-1
-        * CPUID Signature Values of DisplayFamily_DisplayModel".
-        */
        switch (cpu_model) {
        case 0 ... 2:
                *cpu_type = "i386/ppro";
@@ -621,19 +595,15 @@ static int __init ppro_init(char **cpu_type)
        case 14:
                *cpu_type = "i386/core";
                break;
-       case 0x0f:
-       case 0x16:
-       case 0x17:
-       case 0x1d:
+       case 15: case 23:
                *cpu_type = "i386/core_2";
                break;
-       case 0x1a:
-       case 0x1e:
        case 0x2e:
+       case 26:
                spec = &op_arch_perfmon_spec;
                *cpu_type = "i386/core_i7";
                break;
-       case 0x1c:
+       case 28:
                *cpu_type = "i386/atom";
                break;
        default:
@@ -655,8 +625,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        char *cpu_type = NULL;
        int ret = 0;
 
-       using_nmi = 0;
-
        if (!cpu_has_apic)
                return -ENODEV;
 
@@ -739,10 +707,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        mux_init(ops);
 
-       ret = init_sysfs();
-       if (ret)
-               return ret;
-
+       init_sysfs();
        using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
index 1ed963d2e9b6730c99517cbc1a44a0eaf3e47e8c..39686c29f03a2d52774dc47cc616544b47e1e426 100644 (file)
@@ -76,6 +76,19 @@ static struct op_ibs_config ibs_config;
 
 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
 
+static void op_mux_fill_in_addresses(struct op_msrs * const msrs)
+{
+       int i;
+
+       for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
+               int hw_counter = op_x86_virt_to_phys(i);
+               if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
+                       msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
+               else
+                       msrs->multiplex[i].addr = 0;
+       }
+}
+
 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
                               struct op_msrs const * const msrs)
 {
@@ -85,7 +98,7 @@ static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
        /* enable active counters */
        for (i = 0; i < NUM_COUNTERS; ++i) {
                int virt = op_x86_phys_to_virt(i);
-               if (!reset_value[virt])
+               if (!counter_config[virt].enabled)
                        continue;
                rdmsrl(msrs->controls[i].addr, val);
                val &= model->reserved;
@@ -94,6 +107,10 @@ static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
        }
 }
 
+#else
+
+static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { }
+
 #endif
 
 /* functions for op_amd_spec */
@@ -105,12 +122,18 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
        for (i = 0; i < NUM_COUNTERS; i++) {
                if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
                        msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
+               else
+                       msrs->counters[i].addr = 0;
        }
 
        for (i = 0; i < NUM_CONTROLS; i++) {
                if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
                        msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
+               else
+                       msrs->controls[i].addr = 0;
        }
+
+       op_mux_fill_in_addresses(msrs);
 }
 
 static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
@@ -121,8 +144,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
 
        /* setup reset_value */
        for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
-               if (counter_config[i].enabled
-                   && msrs->counters[op_x86_virt_to_phys(i)].addr)
+               if (counter_config[i].enabled)
                        reset_value[i] = counter_config[i].count;
                else
                        reset_value[i] = 0;
@@ -147,7 +169,9 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
        /* enable active counters */
        for (i = 0; i < NUM_COUNTERS; ++i) {
                int virt = op_x86_phys_to_virt(i);
-               if (!reset_value[virt])
+               if (!counter_config[virt].enabled)
+                       continue;
+               if (!msrs->counters[i].addr)
                        continue;
 
                /* setup counter registers */
@@ -381,6 +405,16 @@ static int init_ibs_nmi(void)
                return 1;
        }
 
+#ifdef CONFIG_NUMA
+       /* Sanity check */
+       /* Works only for 64bit with proper numa implementation. */
+       if (nodes != num_possible_nodes()) {
+               printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
+                       "found: %d, expected %d",
+                       nodes, num_possible_nodes());
+               return 1;
+       }
+#endif
        return 0;
 }
 
index e6a160a4684a4a9d96914acc161122d6c7f8c208..ac6b354becdfe9f5859aa7930cc6b2aedb9f17e2 100644 (file)
@@ -394,6 +394,12 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
        setup_num_counters();
        stag = get_stagger();
 
+       /* initialize some registers */
+       for (i = 0; i < num_counters; ++i)
+               msrs->counters[i].addr = 0;
+       for (i = 0; i < num_controls; ++i)
+               msrs->controls[i].addr = 0;
+
        /* the counter & cccr registers we pay attention to */
        for (i = 0; i < num_counters; ++i) {
                addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
index 2873c0087836e647ecc68bf9a785ce02e295f344..8eb05878554cf382a0cf482947a8ebf7a93fdeeb 100644 (file)
@@ -37,11 +37,15 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
        for (i = 0; i < num_counters; i++) {
                if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
                        msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
+               else
+                       msrs->counters[i].addr = 0;
        }
 
        for (i = 0; i < num_counters; i++) {
                if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
                        msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
+               else
+                       msrs->controls[i].addr = 0;
        }
 }
 
@@ -53,7 +57,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
        int i;
 
        if (!reset_value) {
-               reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
+               reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
                                        GFP_ATOMIC);
                if (!reset_value)
                        return;
index b02f6d8ac922f171b5b31be1923aa9e358e73bca..0696d506c4ade99b0d43232128a77cea99d65ec8 100644 (file)
@@ -590,8 +590,6 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
        case PCI_DEVICE_ID_INTEL_ICH10_1:
        case PCI_DEVICE_ID_INTEL_ICH10_2:
        case PCI_DEVICE_ID_INTEL_ICH10_3:
-       case PCI_DEVICE_ID_INTEL_CPT_LPC1:
-       case PCI_DEVICE_ID_INTEL_CPT_LPC2:
                r->name = "PIIX/ICH";
                r->get = pirq_piix_get;
                r->set = pirq_piix_set;
index fa0f651c573eb596eb4397d3fb95f55d5637b8f9..8aa85f17667e5034cfd2fae3eecd217c878d0529 100644 (file)
@@ -104,15 +104,12 @@ static void __save_processor_state(struct saved_context *ctxt)
        ctxt->cr4 = read_cr4();
        ctxt->cr8 = read_cr8();
 #endif
-       ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
-                                              &ctxt->misc_enable);
 }
 
 /* Needed by apm.c */
 void save_processor_state(void)
 {
        __save_processor_state(&saved_context);
-       save_sched_clock_state();
 }
 #ifdef CONFIG_X86_32
 EXPORT_SYMBOL(save_processor_state);
@@ -179,8 +176,6 @@ static void fix_processor_context(void)
  */
 static void __restore_processor_state(struct saved_context *ctxt)
 {
-       if (ctxt->misc_enable_saved)
-               wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
        /*
         * control registers
         */
@@ -254,7 +249,6 @@ static void __restore_processor_state(struct saved_context *ctxt)
 void restore_processor_state(void)
 {
        __restore_processor_state(&saved_context);
-       restore_sched_clock_state();
 }
 #ifdef CONFIG_X86_32
 EXPORT_SYMBOL(restore_processor_state);
index ad47daeafa4eace0e505c51aae54d43317d62233..b641388d8286acfcf73bc6f7ca850609270739b3 100644 (file)
@@ -27,17 +27,10 @@ ENTRY(swsusp_arch_suspend)
        ret
 
 ENTRY(restore_image)
-       movl    mmu_cr4_features, %ecx
        movl    resume_pg_dir, %eax
        subl    $__PAGE_OFFSET, %eax
        movl    %eax, %cr3
 
-       jecxz   1f      # cr4 Pentium and higher, skip if zero
-       andl    $~(X86_CR4_PGE), %ecx
-       movl    %ecx, %cr4;  # turn off PGE
-       movl    %cr3, %eax;  # flush TLB
-       movl    %eax, %cr3
-1:
        movl    restore_pblist, %edx
        .p2align 4,,7
 
@@ -61,8 +54,16 @@ done:
        movl    $swapper_pg_dir, %eax
        subl    $__PAGE_OFFSET, %eax
        movl    %eax, %cr3
+       /* Flush TLB, including "global" things (vmalloc) */
        movl    mmu_cr4_features, %ecx
        jecxz   1f      # cr4 Pentium and higher, skip if zero
+       movl    %ecx, %edx
+       andl    $~(X86_CR4_PGE), %edx
+       movl    %edx, %cr4;  # turn off PGE
+1:
+       movl    %cr3, %eax;  # flush TLB
+       movl    %eax, %cr3
+       jecxz   1f      # cr4 Pentium and higher, skip if zero
        movl    %ecx, %cr4;  # turn PGE back on
 1:
 
index 0087b0098903a6c231d5184a78f07981329d6efe..79f97383cde3be99da33f092c7604876be896dab 100644 (file)
@@ -48,7 +48,6 @@
 #include <asm/traps.h>
 #include <asm/setup.h>
 #include <asm/desc.h>
-#include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/reboot.h>
@@ -924,7 +923,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
 };
 
 static const struct pv_time_ops xen_time_ops __initdata = {
-       .sched_clock = xen_clocksource_read,
+       .sched_clock = xen_sched_clock,
 };
 
 static const struct pv_cpu_ops xen_cpu_ops __initdata = {
@@ -997,6 +996,10 @@ static void xen_reboot(int reason)
 {
        struct sched_shutdown r = { .reason = reason };
 
+#ifdef CONFIG_SMP
+       smp_send_stop();
+#endif
+
        if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
                BUG();
 }
@@ -1089,12 +1092,6 @@ asmlinkage void __init xen_start_kernel(void)
 
        __supported_pte_mask |= _PAGE_IOMAP;
 
-       /*
-        * Prevent page tables from being allocated in highmem, even
-        * if CONFIG_HIGHPTE is enabled.
-        */
-       __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
-
 #ifdef CONFIG_X86_64
        /* Work out if we support NX */
        check_efer();
index 350a3deedf25496dc12571dd950c6aca0f740ff6..bf4cd6bfe959f1037431668c71a263050098f35a 100644 (file)
@@ -1432,15 +1432,14 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
 {
        pgprot_t prot = PAGE_KERNEL;
 
-       /*
-        * We disable highmem allocations for page tables so we should never
-        * see any calls to kmap_atomic_pte on a highmem page.
-        */
-       BUG_ON(PageHighMem(page));
-
        if (PagePinned(page))
                prot = PAGE_KERNEL_RO;
 
+       if (0 && PageHighMem(page))
+               printk("mapping highpte %lx type %d prot %s\n",
+                      page_to_pfn(page), type,
+                      (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
+
        return kmap_atomic_prot(page, type, prot);
 }
 #endif
index ca5f56e9aaa0af9754364bc33d3009aef0a913fc..360f8d8c19cd2e933fb010f7f7cfa990fc042823 100644 (file)
@@ -396,9 +396,9 @@ static void stop_self(void *v)
        BUG();
 }
 
-static void xen_stop_other_cpus(int wait)
+static void xen_smp_send_stop(void)
 {
-       smp_call_function(stop_self, NULL, wait);
+       smp_call_function(stop_self, NULL, 0);
 }
 
 static void xen_smp_send_reschedule(int cpu)
@@ -466,7 +466,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
        .cpu_disable = xen_cpu_disable,
        .play_dead = xen_play_dead,
 
-       .stop_other_cpus = xen_stop_other_cpus,
+       .smp_send_stop = xen_smp_send_stop,
        .smp_send_reschedule = xen_smp_send_reschedule,
 
        .send_call_func_ipi = xen_smp_send_call_function_ipi,
index a9c6611080347bd4368beff2ab582d07347fa256..987267f79bf5154648cb729cedf29e712fa58ab3 100644 (file)
@@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data)
 
 void xen_arch_resume(void)
 {
-       on_each_cpu(xen_vcpu_notify_restore,
-                   (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
+       smp_call_function(xen_vcpu_notify_restore,
+                              (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
 }
index 8e04980d4697279ef461a6412053f48b48b429ce..9d1f853120d859cfc814e0f2eaa2e01b3f1575e8 100644 (file)
@@ -154,6 +154,45 @@ static void do_stolen_accounting(void)
        account_idle_ticks(ticks);
 }
 
+/*
+ * Xen sched_clock implementation.  Returns the number of unstolen
+ * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
+ * states.
+ */
+unsigned long long xen_sched_clock(void)
+{
+       struct vcpu_runstate_info state;
+       cycle_t now;
+       u64 ret;
+       s64 offset;
+
+       /*
+        * Ideally sched_clock should be called on a per-cpu basis
+        * anyway, so preempt should already be disabled, but that's
+        * not current practice at the moment.
+        */
+       preempt_disable();
+
+       now = xen_clocksource_read();
+
+       get_runstate_snapshot(&state);
+
+       WARN_ON(state.state != RUNSTATE_running);
+
+       offset = now - state.state_entry_time;
+       if (offset < 0)
+               offset = 0;
+
+       ret = state.time[RUNSTATE_blocked] +
+               state.time[RUNSTATE_running] +
+               offset;
+
+       preempt_enable();
+
+       return ret;
+}
+
+
 /* Get the TSC speed from Xen */
 unsigned long xen_tsc_khz(void)
 {
index ed8cd3cbd4993de9619f1e6db12af3c26b166c9b..f04c9891142fa7a5090d966ac4779b0af419ebca 100644 (file)
@@ -29,6 +29,5 @@
 # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
 #endif
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
 
 #endif /* _XTENSA_CACHE_H */
index 30a7e51589305d9504b6689c5f2587d5234ce03f..9083cf0180cc8a296d2529a4ef9d9943ea5d05c1 100644 (file)
@@ -205,8 +205,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        unaligned = 1;
                        break;
                }
-               if (!iov[i].iov_len)
-                       return -EINVAL;
        }
 
        if (unaligned || (q->dma_pad_mask & len) || map_data)
index 7c7b8c1c190f75faadb5318ded7cfa1febc71a8d..d5aa8865c6441d80e9b5792c2ac7bd63a52debd2 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/blkdev.h>
 #include <linux/bootmem.h>     /* for max_pfn/max_low_pfn */
 #include <linux/gcd.h>
-#include <linux/lcm.h>
 
 #include "blk.h"
 
@@ -352,7 +351,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
  *   hardware can operate on without reverting to read-modify-write
  *   operations.
  */
-void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
+void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
 {
        q->limits.physical_block_size = size;
 
@@ -491,31 +490,18 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
 
 /**
  * blk_stack_limits - adjust queue_limits for stacked devices
- * @t: the stacking driver limits (top device)
- * @b:  the underlying queue limits (bottom, component device)
+ * @t: the stacking driver limits (top)
+ * @b:  the underlying queue limits (bottom)
  * @offset:  offset to beginning of data within component device
  *
  * Description:
- *    This function is used by stacking drivers like MD and DM to ensure
- *    that all component devices have compatible block sizes and
- *    alignments.  The stacking driver must provide a queue_limits
- *    struct (top) and then iteratively call the stacking function for
- *    all component (bottom) devices.  The stacking function will
- *    attempt to combine the values and ensure proper alignment.
- *
- *    Returns 0 if the top and bottom queue_limits are compatible.  The
- *    top device's block sizes and alignment offsets may be adjusted to
- *    ensure alignment with the bottom device. If no compatible sizes
- *    and alignments exist, -1 is returned and the resulting top
- *    queue_limits will have the misaligned flag set to indicate that
- *    the alignment_offset is undefined.
+ *    Merges two queue_limit structs.  Returns 0 if alignment didn't
+ *    change.  Returns -1 if adding the bottom device caused
+ *    misalignment.
  */
 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                     sector_t offset)
 {
-       sector_t alignment;
-       unsigned int top, bottom, ret = 0;
-
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
@@ -532,26 +518,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
        t->max_segment_size = min_not_zero(t->max_segment_size,
                                           b->max_segment_size);
 
-       t->misaligned |= b->misaligned;
-
-       alignment = queue_limit_alignment_offset(b, offset);
-
-       /* Bottom device has different alignment.  Check that it is
-        * compatible with the current top alignment.
-        */
-       if (t->alignment_offset != alignment) {
-
-               top = max(t->physical_block_size, t->io_min)
-                       + t->alignment_offset;
-               bottom = max(b->physical_block_size, b->io_min) + alignment;
-
-               /* Verify that top and bottom intervals line up */
-               if (max(top, bottom) & (min(top, bottom) - 1)) {
-                       t->misaligned = 1;
-                       ret = -1;
-               }
-       }
-
        t->logical_block_size = max(t->logical_block_size,
                                    b->logical_block_size);
 
@@ -559,46 +525,37 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                     b->physical_block_size);
 
        t->io_min = max(t->io_min, b->io_min);
-       t->io_opt = lcm(t->io_opt, b->io_opt);
-
        t->no_cluster |= b->no_cluster;
 
-       /* Physical block size a multiple of the logical block size? */
-       if (t->physical_block_size & (t->logical_block_size - 1)) {
-               t->physical_block_size = t->logical_block_size;
-               t->misaligned = 1;
-               ret = -1;
-       }
-
-       /* Minimum I/O a multiple of the physical block size? */
-       if (t->io_min & (t->physical_block_size - 1)) {
-               t->io_min = t->physical_block_size;
+       /* Bottom device offset aligned? */
+       if (offset &&
+           (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
                t->misaligned = 1;
-               ret = -1;
+               return -1;
        }
 
-       /* Optimal I/O a multiple of the physical block size? */
-       if (t->io_opt & (t->physical_block_size - 1)) {
-               t->io_opt = 0;
-               t->misaligned = 1;
-               ret = -1;
-       }
-
-       /* Find lowest common alignment_offset */
-       t->alignment_offset = lcm(t->alignment_offset, alignment)
-               & (max(t->physical_block_size, t->io_min) - 1);
+       /* If top has no alignment offset, inherit from bottom */
+       if (!t->alignment_offset)
+               t->alignment_offset =
+                       b->alignment_offset & (b->physical_block_size - 1);
 
-       /* Verify that new alignment_offset is on a logical block boundary */
+       /* Top device aligned on logical block boundary? */
        if (t->alignment_offset & (t->logical_block_size - 1)) {
                t->misaligned = 1;
-               ret = -1;
+               return -1;
        }
 
-       /* Discard */
-       t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
-                                             b->max_discard_sectors);
+       /* Find lcm() of optimal I/O size */
+       if (t->io_opt && b->io_opt)
+               t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
+       else if (b->io_opt)
+               t->io_opt = b->io_opt;
 
-       return ret;
+       /* Verify that optimal I/O size is a multiple of io_min */
+       if (t->io_min && t->io_opt % t->io_min)
+               return -1;
+
+       return 0;
 }
 EXPORT_SYMBOL(blk_stack_limits);
 
index 4f0c06c7a3388062f5aa58a8ffbe56de5d7c1f1e..1ba7e0aca8781b55ce14fa54491da8f81505b747 100644 (file)
@@ -109,7 +109,6 @@ void blk_rq_timed_out_timer(unsigned long data)
        struct request_queue *q = (struct request_queue *) data;
        unsigned long flags, next = 0;
        struct request *rq, *tmp;
-       int next_set = 0;
 
        spin_lock_irqsave(q->queue_lock, flags);
 
@@ -123,13 +122,16 @@ void blk_rq_timed_out_timer(unsigned long data)
                        if (blk_mark_rq_complete(rq))
                                continue;
                        blk_rq_timed_out(rq);
-               } else if (!next_set || time_after(next, rq->deadline)) {
+               } else if (!next || time_after(next, rq->deadline))
                        next = rq->deadline;
-                       next_set = 1;
-               }
        }
 
-       if (next_set)
+       /*
+        * next can never be 0 here with the list non-empty, since we always
+        * bump ->deadline to 1 so we can detect if the timer was ever added
+        * or not. See comment in blk_add_timer()
+        */
+       if (next)
                mod_timer(&q->timeout, round_jiffies_up(next));
 
        spin_unlock_irqrestore(q->queue_lock, flags);
index 7154a7a7e9ca6240c30ccf4cf5d5213edd3d0c18..0676301f16d0a619bd76261ebb5560040ce1a4e9 100644 (file)
@@ -424,7 +424,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
        /*
         * fill in all the output members
         */
-       hdr->device_status = rq->errors & 0xff;
+       hdr->device_status = status_byte(rq->errors);
        hdr->transport_status = host_byte(rq->errors);
        hdr->driver_status = driver_byte(rq->errors);
        hdr->info = 0;
index 1d5a7805446b2dfd087df4538cacba86cea4d8ba..e5b10017a50b121a9288d6d0354bfe9ebb15e499 100644 (file)
@@ -319,47 +319,33 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
        if (hdr->iovec_count) {
                const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
                size_t iov_data_len;
-               struct sg_iovec *sg_iov;
-               struct iovec *iov;
-               int i;
+               struct sg_iovec *iov;
 
-               sg_iov = kmalloc(size, GFP_KERNEL);
-               if (!sg_iov) {
+               iov = kmalloc(size, GFP_KERNEL);
+               if (!iov) {
                        ret = -ENOMEM;
                        goto out;
                }
 
-               if (copy_from_user(sg_iov, hdr->dxferp, size)) {
-                       kfree(sg_iov);
+               if (copy_from_user(iov, hdr->dxferp, size)) {
+                       kfree(iov);
                        ret = -EFAULT;
                        goto out;
                }
 
-               /*
-                * Sum up the vecs, making sure they don't overflow
-                */
-               iov = (struct iovec *) sg_iov;
-               iov_data_len = 0;
-               for (i = 0; i < hdr->iovec_count; i++) {
-                       if (iov_data_len + iov[i].iov_len < iov_data_len) {
-                               kfree(sg_iov);
-                               ret = -EINVAL;
-                               goto out;
-                       }
-                       iov_data_len += iov[i].iov_len;
-               }
-
                /* SG_IO howto says that the shorter of the two wins */
+               iov_data_len = iov_length((struct iovec *)iov,
+                                         hdr->iovec_count);
                if (hdr->dxfer_len < iov_data_len) {
-                       hdr->iovec_count = iov_shorten(iov,
+                       hdr->iovec_count = iov_shorten((struct iovec *)iov,
                                                       hdr->iovec_count,
                                                       hdr->dxfer_len);
                        iov_data_len = hdr->dxfer_len;
                }
 
-               ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
+               ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
                                          iov_data_len, GFP_KERNEL);
-               kfree(sg_iov);
+               kfree(iov);
        } else if (hdr->dxfer_len)
                ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
                                      GFP_KERNEL);
index ce038d861eb9d2706e4474e725b5c6f4eadc4a00..943f2abac9b44fa6cab26fc763c1939702e7a6e8 100644 (file)
@@ -324,7 +324,6 @@ struct dma_async_tx_descriptor *
 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
                        struct page **blocks, struct async_submit_ctl *submit)
 {
-       void *scribble = submit->scribble;
        int non_zero_srcs, i;
 
        BUG_ON(faila == failb);
@@ -333,13 +332,11 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
 
        pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
 
-       /* if a dma resource is not available or a scribble buffer is not
-        * available punt to the synchronous path.  In the 'dma not
-        * available' case be sure to use the scribble buffer to
-        * preserve the content of 'blocks' as the caller intended.
+       /* we need to preserve the contents of 'blocks' for the async
+        * case, so punt to synchronous if a scribble buffer is not available
         */
-       if (!async_dma_find_channel(DMA_PQ) || !scribble) {
-               void **ptrs = scribble ? scribble : (void **) blocks;
+       if (!submit->scribble) {
+               void **ptrs = (void **) blocks;
 
                async_tx_quiesce(&submit->depend_tx);
                for (i = 0; i < disks; i++)
@@ -409,13 +406,11 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
 
        pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
 
-       /* if a dma resource is not available or a scribble buffer is not
-        * available punt to the synchronous path.  In the 'dma not
-        * available' case be sure to use the scribble buffer to
-        * preserve the content of 'blocks' as the caller intended.
+       /* we need to preserve the contents of 'blocks' for the async
+        * case, so punt to synchronous if a scribble buffer is not available
         */
-       if (!async_dma_find_channel(DMA_PQ) || !scribble) {
-               void **ptrs = scribble ? scribble : (void **) blocks;
+       if (!scribble) {
+               void **ptrs = (void **) blocks;
 
                async_tx_quiesce(&submit->depend_tx);
                for (i = 0; i < disks; i++)
index 0d54de91105070932993c02d1c07f606e792aaf9..4d6f49a5daeb4f75881090abd4c5f556bf5b33c3 100644 (file)
@@ -46,12 +46,6 @@ struct authenc_request_ctx {
        char tail[];
 };
 
-static void authenc_request_complete(struct aead_request *req, int err)
-{
-       if (err != -EINPROGRESS)
-               aead_request_complete(req, err);
-}
-
 static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
                                 unsigned int keylen)
 {
@@ -148,7 +142,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
                                 crypto_aead_authsize(authenc), 1);
 
 out:
-       authenc_request_complete(req, err);
+       aead_request_complete(req, err);
 }
 
 static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
@@ -214,7 +208,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
        err = crypto_ablkcipher_decrypt(abreq);
 
 out:
-       authenc_request_complete(req, err);
+       aead_request_complete(req, err);
 }
 
 static void authenc_verify_ahash_done(struct crypto_async_request *areq,
@@ -251,7 +245,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
        err = crypto_ablkcipher_decrypt(abreq);
 
 out:
-       authenc_request_complete(req, err);
+       aead_request_complete(req, err);
 }
 
 static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
@@ -385,7 +379,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
                err = crypto_authenc_genicv(areq, iv, 0);
        }
 
-       authenc_request_complete(areq, err);
+       aead_request_complete(areq, err);
 }
 
 static int crypto_authenc_encrypt(struct aead_request *req)
@@ -424,7 +418,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
                err = crypto_authenc_genicv(areq, greq->giv, 0);
        }
 
-       authenc_request_complete(areq, err);
+       aead_request_complete(areq, err);
 }
 
 static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
index 2a4106d37946e02f302842671f9b3e55c9b99d8d..6d5b746637be226da5d9aac78af39a9851188464 100644 (file)
@@ -1477,54 +1477,9 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
        return err;
 }
 
-static int alg_test_null(const struct alg_test_desc *desc,
-                            const char *driver, u32 type, u32 mask)
-{
-       return 0;
-}
-
 /* Please keep this list sorted by algorithm name. */
 static const struct alg_test_desc alg_test_descs[] = {
        {
-               .alg = "__driver-cbc-aes-aesni",
-               .test = alg_test_null,
-               .suite = {
-                       .cipher = {
-                               .enc = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               },
-                               .dec = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               }
-                       }
-               }
-       }, {
-               .alg = "__driver-ecb-aes-aesni",
-               .test = alg_test_null,
-               .suite = {
-                       .cipher = {
-                               .enc = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               },
-                               .dec = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               }
-                       }
-               }
-       }, {
-               .alg = "__ghash-pclmulqdqni",
-               .test = alg_test_null,
-               .suite = {
-                       .hash = {
-                               .vecs = NULL,
-                               .count = 0
-                       }
-               }
-       }, {
                .alg = "ansi_cprng",
                .test = alg_test_cprng,
                .fips_allowed = 1,
@@ -1667,30 +1622,6 @@ static const struct alg_test_desc alg_test_descs[] = {
                                .count = CRC32C_TEST_VECTORS
                        }
                }
-       }, {
-               .alg = "cryptd(__driver-ecb-aes-aesni)",
-               .test = alg_test_null,
-               .suite = {
-                       .cipher = {
-                               .enc = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               },
-                               .dec = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               }
-                       }
-               }
-       }, {
-               .alg = "cryptd(__ghash-pclmulqdqni)",
-               .test = alg_test_null,
-               .suite = {
-                       .hash = {
-                               .vecs = NULL,
-                               .count = 0
-                       }
-               }
        }, {
                .alg = "ctr(aes)",
                .test = alg_test_skcipher,
@@ -1737,21 +1668,6 @@ static const struct alg_test_desc alg_test_descs[] = {
                                }
                        }
                }
-       }, {
-               .alg = "ecb(__aes-aesni)",
-               .test = alg_test_null,
-               .suite = {
-                       .cipher = {
-                               .enc = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               },
-                               .dec = {
-                                       .vecs = NULL,
-                                       .count = 0
-                               }
-                       }
-               }
        }, {
                .alg = "ecb(aes)",
                .test = alg_test_skcipher,
index 91a927688ae63797fe35739f7fa0102aa34e273f..6a4eff18c29f0248dd423175a9691a095626680c 100755 (executable)
@@ -17,7 +17,6 @@ obj-$(CONFIG_SFI)             += sfi/
 obj-$(CONFIG_PNP)              += pnp/
 obj-$(CONFIG_ARM_AMBA)         += amba/
 
-obj-$(CONFIG_VIRTIO)           += virtio/
 obj-$(CONFIG_XEN)              += xen/
 
 # regulators early, since some subsystems rely on them to initialize
@@ -109,6 +108,7 @@ obj-$(CONFIG_HID)           += hid/
 obj-$(CONFIG_PPC_PS3)          += ps3/
 obj-$(CONFIG_OF)               += of/
 obj-$(CONFIG_SSB)              += ssb/
+obj-$(CONFIG_VIRTIO)           += virtio/
 obj-$(CONFIG_VLYNQ)            += vlynq/
 obj-$(CONFIG_STAGING)          += staging/
 obj-y                          += platform/
index d46e2565ae47244b0f64d5584ea5baef6ea64397..81e64f478679be04dafdb1a1786e34e72906c310 100644 (file)
@@ -846,7 +846,6 @@ struct acpi_bit_register_info {
        ACPI_BITMASK_POWER_BUTTON_STATUS   | \
        ACPI_BITMASK_SLEEP_BUTTON_STATUS   | \
        ACPI_BITMASK_RT_CLOCK_STATUS       | \
-       ACPI_BITMASK_PCIEXP_WAKE_DISABLE   | \
        ACPI_BITMASK_WAKE_STATUS)
 
 #define ACPI_BITMASK_TIMER_ENABLE               0x0001
index 83b62521d8d300fff08b105177b1f0bd2be5dd3e..52fec07064f0be7581aa72160d829b521959bfbe 100644 (file)
@@ -468,23 +468,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
 
                acpi_ut_add_reference(obj_desc->field.region_obj);
 
-               /* allow full data read from EC address space */
-               if (obj_desc->field.region_obj->region.space_id ==
-                       ACPI_ADR_SPACE_EC) {
-                       if (obj_desc->common_field.bit_length > 8) {
-                               unsigned width =
-                                       ACPI_ROUND_BITS_UP_TO_BYTES(
-                                       obj_desc->common_field.bit_length);
-                               // access_bit_width is u8, don't overflow it
-                               if (width > 8)
-                                       width = 8;
-                               obj_desc->common_field.access_byte_width =
-                                                       width;
-                               obj_desc->common_field.access_bit_width =
-                                                       8 * width;
-                       }
-               }
-
                ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
                                  "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
                                  obj_desc->field.start_field_bit_offset,
index 5624d7bc24af367361bcc05e70cee3d3aa1286d9..23e5a0519af552d27ddcb9f8c14aa38062b434ea 100644 (file)
@@ -185,12 +185,6 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
        acpi_osi_setup("!Windows 2006");
        return 0;
 }
-static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
-{
-       printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
-       acpi_osi_setup("!Windows 2009");
-       return 0;
-}
 
 static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
        {
@@ -217,38 +211,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
                },
        },
-       {
-       .callback = dmi_disable_osi_vista,
-       .ident = "Toshiba Satellite L355",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_vista,
-       .ident = "Toshiba Satellite L355",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win7,
-       .ident = "ASUS K50IJ",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_vista,
-       .ident = "Toshiba P305D",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
-               },
-       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index 45d2aa93258ed3881a33c13fa58d83fb49821e33..f1670e0ef9bb7fb7836ee30af5769bb9430f6f50 100644 (file)
@@ -588,12 +588,12 @@ static u32 acpi_ec_gpe_handler(void *data)
 
 static acpi_status
 acpi_ec_space_handler(u32 function, acpi_physical_address address,
-                     u32 bits, acpi_integer *value64,
+                     u32 bits, acpi_integer *value,
                      void *handler_context, void *region_context)
 {
        struct acpi_ec *ec = handler_context;
-       int result = 0, i, bytes = bits / 8;
-       u8 *value = (u8 *)value64;
+       int result = 0, i;
+       u8 temp = 0;
 
        if ((address > 0xFF) || !value || !handler_context)
                return AE_BAD_PARAMETER;
@@ -601,15 +601,32 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
        if (function != ACPI_READ && function != ACPI_WRITE)
                return AE_BAD_PARAMETER;
 
-       if (EC_FLAGS_MSI || bits > 8)
+       if (bits != 8 && acpi_strict)
+               return AE_BAD_PARAMETER;
+
+       if (EC_FLAGS_MSI)
                acpi_ec_burst_enable(ec);
 
-       for (i = 0; i < bytes; ++i, ++address, ++value)
-               result = (function == ACPI_READ) ?
-                       acpi_ec_read(ec, address, value) :
-                       acpi_ec_write(ec, address, *value);
+       if (function == ACPI_READ) {
+               result = acpi_ec_read(ec, address, &temp);
+               *value = temp;
+       } else {
+               temp = 0xff & (*value);
+               result = acpi_ec_write(ec, address, temp);
+       }
+
+       for (i = 8; unlikely(bits - i > 0); i += 8) {
+               ++address;
+               if (function == ACPI_READ) {
+                       result = acpi_ec_read(ec, address, &temp);
+                       (*value) |= ((acpi_integer)temp) << i;
+               } else {
+                       temp = 0xff & ((*value) >> i);
+                       result = acpi_ec_write(ec, address, temp);
+               }
+       }
 
-       if (EC_FLAGS_MSI || bits > 8)
+       if (EC_FLAGS_MSI)
                acpi_ec_burst_disable(ec);
 
        switch (result) {
index c2160626131833ea46f88957065e3b93b6c0621e..2ef7030a0c28b7d428de422d6cda082a36ec8723 100644 (file)
@@ -34,7 +34,7 @@
 #define ACPI_POWER_METER_NAME          "power_meter"
 ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
 #define ACPI_POWER_METER_DEVICE_NAME   "Power Meter"
-#define ACPI_POWER_METER_CLASS         "pwr_meter_resource"
+#define ACPI_POWER_METER_CLASS         "power_meter_resource"
 
 #define NUM_SENSORS                    17
 
index 71024740d62b178c784fd46c37b9ffca8c76f1a0..ec742a4e563505704dbc3e92137aecf324d04aaa 100644 (file)
@@ -133,6 +133,12 @@ static int set_no_mwait(const struct dmi_system_id *id)
 }
 
 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
+       {
+       set_no_mwait, "IFL91 board", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
+       DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
+       DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
+       DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
        {
        set_no_mwait, "Extensa 5220", {
        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
index a6ad608c96a2f050ccaa5adbe19921d48621fc8e..d9f78f6cbda31f1002f2aa5390605f29c7c57bd3 100644 (file)
@@ -888,14 +888,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
                return(acpi_idle_enter_c1(dev, state));
 
        local_irq_disable();
-       if (cx->entry_method != ACPI_CSTATE_FFH) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we test
-                * NEED_RESCHED:
-                */
-               smp_mb();
-       }
+       current_thread_info()->status &= ~TS_POLLING;
+       /*
+        * TS_POLLING-cleared state must be visible before we test
+        * NEED_RESCHED:
+        */
+       smp_mb();
 
        if (unlikely(need_resched())) {
                current_thread_info()->status |= TS_POLLING;
@@ -962,7 +960,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        if (acpi_idle_suspend)
                return(acpi_idle_enter_c1(dev, state));
 
-       if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
+       if (acpi_idle_bm_check()) {
                if (dev->safe_state) {
                        dev->last_state = dev->safe_state;
                        return dev->safe_state->enter(dev, dev->safe_state);
@@ -975,14 +973,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        }
 
        local_irq_disable();
-       if (cx->entry_method != ACPI_CSTATE_FFH) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we test
-                * NEED_RESCHED:
-                */
-               smp_mb();
-       }
+       current_thread_info()->status &= ~TS_POLLING;
+       /*
+        * TS_POLLING-cleared state must be visible before we test
+        * NEED_RESCHED:
+        */
+       smp_mb();
 
        if (unlikely(need_resched())) {
                current_thread_info()->status |= TS_POLLING;
index 40d395efec1e9dafab9db46b499956b64f314d42..8ba0ed0b9ddbc912684d398e2eda14357920aed4 100644 (file)
@@ -356,11 +356,7 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
        if (result)
                goto update_bios;
 
-       /* We need to call _PPC once when cpufreq starts */
-       if (ignore_ppc != 1)
-               result = acpi_processor_get_platform_limit(pr);
-
-       return result;
+       return 0;
 
        /*
         * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
index 045809465347afeffce70a7e60075b7e30235cc9..5f2c379ab7bfba1903158f5c3f94cf37631a6c44 100644 (file)
@@ -80,7 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
 
 #ifdef CONFIG_ACPI_SLEEP
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-
 /*
  * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
  * user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -171,6 +170,18 @@ static void acpi_pm_end(void)
 #endif /* CONFIG_ACPI_SLEEP */
 
 #ifdef CONFIG_SUSPEND
+/*
+ * According to the ACPI specification the BIOS should make sure that ACPI is
+ * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states.  Still,
+ * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
+ * on such systems during resume.  Unfortunately that doesn't help in
+ * particularly pathological cases in which SCI_EN has to be set directly on
+ * resume, although the specification states very clearly that this flag is
+ * owned by the hardware.  The set_sci_en_on_resume variable will be set in such
+ * cases.
+ */
+static bool set_sci_en_on_resume;
+
 extern void do_suspend_lowlevel(void);
 
 static u32 acpi_suspend_states[] = {
@@ -237,8 +248,11 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
                break;
        }
 
-       /* This violates the spec but is required for bug compatibility. */
-       acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+       /* If ACPI is not enabled by the BIOS, we need to enable it here. */
+       if (set_sci_en_on_resume)
+               acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
+       else
+               acpi_enable();
 
        /* Reprogram control registers and execute _BFS */
        acpi_leave_sleep_state_prep(acpi_state);
@@ -327,6 +341,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
+{
+       set_sci_en_on_resume = true;
+       return 0;
+}
+
 static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        {
        .callback = init_old_suspend_ordering,
@@ -345,6 +365,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
                },
        },
        {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Apple MacBook 1,1",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Apple MacMini 1,1",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+               },
+       },
+       {
        .callback = init_old_suspend_ordering,
        .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
        .matches = {
@@ -353,6 +389,62 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
                },
        },
        {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Toshiba Satellite L300",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Hewlett-Packard HP G7000 Notebook PC",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Hewlett-Packard Pavilion dv4",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Hewlett-Packard Pavilion dv7",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
+       .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
+               },
+       },
+       {
        .callback = init_old_suspend_ordering,
        .ident = "Panasonic CF51-2L",
        .matches = {
index 8a0ed2800e6359a49f781b41d2afc44cd183225b..f336bca7c4503ec1d03d7741ee8185afae942381 100644 (file)
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
        unsigned long table_end;
        acpi_size tbl_size;
 
-       if (acpi_disabled && !acpi_ht)
+       if (acpi_disabled)
                return -ENODEV;
 
        if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
        struct acpi_table_header *table = NULL;
        acpi_size tbl_size;
 
-       if (acpi_disabled && !acpi_ht)
+       if (acpi_disabled)
                return -ENODEV;
 
        if (!handler)
index e7b960602b663806d01e888c4d41cdeff9c3e760..575593a8b4e66b2f6132f6541178c1d68498bbcc 100644 (file)
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
                                ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
                if (!strcmp("video", str))
                        acpi_video_support |=
-                               ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
+                               ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
        }
        return 1;
 }
index e3d9816b2a223cf465125bd89aef6c52c1b5fa9b..9b375028318a6806f220a8e35ff646adb038cae6 100644 (file)
@@ -570,12 +570,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
        { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
        { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
-       { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
-       { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
-       { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
-       { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
-       { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
-       { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -2837,14 +2831,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
                 * to the harddisk doesn't become online after
                 * resuming from STR.  Warn and fail suspend.
-                *
-                * http://bugzilla.kernel.org/show_bug.cgi?id=12276
-                *
-                * Use dates instead of versions to match as HP is
-                * apparently recycling both product and version
-                * strings.
-                *
-                * http://bugzilla.kernel.org/show_bug.cgi?id=15462
                 */
                {
                        .ident = "dv4",
@@ -2853,7 +2839,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                                DMI_MATCH(DMI_PRODUCT_NAME,
                                          "HP Pavilion dv4 Notebook PC"),
                        },
-                       .driver_data = "20090105",      /* F.30 */
+                       .driver_data = "F.30", /* cutoff BIOS version */
                },
                {
                        .ident = "dv5",
@@ -2862,7 +2848,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                                DMI_MATCH(DMI_PRODUCT_NAME,
                                          "HP Pavilion dv5 Notebook PC"),
                        },
-                       .driver_data = "20090506",      /* F.16 */
+                       .driver_data = "F.16", /* cutoff BIOS version */
                },
                {
                        .ident = "dv6",
@@ -2871,7 +2857,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                                DMI_MATCH(DMI_PRODUCT_NAME,
                                          "HP Pavilion dv6 Notebook PC"),
                        },
-                       .driver_data = "20090423",      /* F.21 */
+                       .driver_data = "F.21",  /* cutoff BIOS version */
                },
                {
                        .ident = "HDX18",
@@ -2880,7 +2866,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                                DMI_MATCH(DMI_PRODUCT_NAME,
                                          "HP HDX18 Notebook PC"),
                        },
-                       .driver_data = "20090430",      /* F.23 */
+                       .driver_data = "F.23",  /* cutoff BIOS version */
                },
                /*
                 * Acer eMachines G725 has the same problem.  BIOS
@@ -2888,8 +2874,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                 * work.  Inbetween, there are V1.06, V2.06 and V3.03
                 * that we don't have much idea about.  For now,
                 * blacklist anything older than V3.04.
-                *
-                * http://bugzilla.kernel.org/show_bug.cgi?id=15104
                 */
                {
                        .ident = "G725",
@@ -2897,21 +2881,19 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
                                DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
                                DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
                        },
-                       .driver_data = "20091216",      /* V3.04 */
+                       .driver_data = "V3.04", /* cutoff BIOS version */
                },
                { }     /* terminate list */
        };
        const struct dmi_system_id *dmi = dmi_first_match(sysids);
-       int year, month, date;
-       char buf[9];
+       const char *ver;
 
        if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
                return false;
 
-       dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
-       snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+       ver = dmi_get_system_info(DMI_BIOS_VERSION);
 
-       return strcmp(buf, dmi->driver_data) < 0;
+       return !ver || strcmp(ver, dmi->driver_data) < 0;
 }
 
 static bool ahci_broken_online(struct pci_dev *pdev)
@@ -3037,16 +3019,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
                return -ENODEV;
 
-       /*
-        * For some reason, MCP89 on MacBook 7,1 doesn't work with
-        * ahci, use ata_generic instead.
-        */
-       if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
-           pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
-           pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
-           pdev->subsystem_device == 0xcb89)
-               return -ENODEV;
-
        /* acquire resources */
        rc = pcim_enable_device(pdev);
        if (rc)
@@ -3102,16 +3074,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        ahci_save_initial_config(pdev, hpriv);
 
        /* prepare host */
-       if (hpriv->cap & HOST_CAP_NCQ) {
-               pi.flags |= ATA_FLAG_NCQ;
-               /* Auto-activate optimization is supposed to be supported on
-                  all AHCI controllers indicating NCQ support, but it seems
-                  to be broken at least on some NVIDIA MCP79 chipsets.
-                  Until we get info on which NVIDIA chipsets don't have this
-                  issue, if any, disable AA on all NVIDIA AHCIs. */
-               if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
-                       pi.flags |= ATA_FLAG_FPDMA_AA;
-       }
+       if (hpriv->cap & HOST_CAP_NCQ)
+               pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
 
        if (hpriv->cap & HOST_CAP_PMP)
                pi.flags |= ATA_FLAG_PMP;
index 99e719619e7d4f504150cfa95823154e884faad6..ecfd22b4f1ce9743afb8d9c52c308a6cdfa556a4 100644 (file)
  *     A generic parallel ATA driver using libata
  */
 
-enum {
-       ATA_GEN_CLASS_MATCH             = (1 << 0),
-       ATA_GEN_FORCE_DMA               = (1 << 1),
-};
-
 /**
  *     generic_set_mode        -       mode setting
  *     @link: link to set up
@@ -51,17 +46,13 @@ enum {
 static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
 {
        struct ata_port *ap = link->ap;
-       const struct pci_device_id *id = ap->host->private_data;
        int dma_enabled = 0;
        struct ata_device *dev;
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
-       if (id->driver_data & ATA_GEN_FORCE_DMA) {
-               dma_enabled = 0xff;
-       } else if (ap->ioaddr.bmdma_addr) {
-               /* Bits 5 and 6 indicate if DMA is active on master/slave */
+       /* Bits 5 and 6 indicate if DMA is active on master/slave */
+       if (ap->ioaddr.bmdma_addr)
                dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-       }
 
        if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
                dma_enabled = 0xFF;
@@ -135,7 +126,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
        const struct ata_port_info *ppi[] = { &info, NULL };
 
        /* Don't use the generic entry unless instructed to do so */
-       if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
+       if (id->driver_data == 1 && all_generic_ide == 0)
                return -ENODEV;
 
        /* Devices that need care */
@@ -164,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
                        return rc;
                pcim_pin_device(dev);
        }
-       return ata_pci_sff_init_one(dev, ppi, &generic_sht, (void *)id);
+       return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL);
 }
 
 static struct pci_device_id ata_generic[] = {
@@ -176,21 +167,12 @@ static struct pci_device_id ata_generic[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_HINT,   PCI_DEVICE_ID_HINT_VXPROII_IDE), },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA,    PCI_DEVICE_ID_VIA_82C561), },
        { PCI_DEVICE(PCI_VENDOR_ID_OPTI,   PCI_DEVICE_ID_OPTI_82C558), },
-       { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
-         .driver_data = ATA_GEN_FORCE_DMA },
-       /*
-        * For some reason, MCP89 on MacBook 7,1 doesn't work with
-        * ahci, use ata_generic instead.
-        */
-       { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA,
-         PCI_VENDOR_ID_APPLE, 0xcb89,
-         .driver_data = ATA_GEN_FORCE_DMA },
+       { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
        { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
        { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
        { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2),  },
        /* Must come last. If you add entries adjust this table appropriately */
-       { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
-         .driver_data = ATA_GEN_CLASS_MATCH },
+       { PCI_ANY_ID,           PCI_ANY_ID,                        PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
        { 0, },
 };
 
index c33591d3a5999a4e624578132b53eea00d820a98..0c6155f51173484a66bf80b5e5efe6f1f1fc464c 100644 (file)
@@ -157,7 +157,6 @@ struct piix_map_db {
 struct piix_host_priv {
        const int *map;
        u32 saved_iocfg;
-       spinlock_t sidpr_lock;  /* FIXME: remove once locking in EH is fixed */
        void __iomem *sidpr;
 };
 
@@ -292,14 +291,6 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (PCH) */
        { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
-       /* SATA Controller IDE (CPT) */
-       { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
-       /* SATA Controller IDE (CPT) */
-       { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
-       /* SATA Controller IDE (CPT) */
-       { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
-       /* SATA Controller IDE (CPT) */
-       { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        { }     /* terminate list */
 };
 
@@ -949,15 +940,12 @@ static int piix_sidpr_scr_read(struct ata_link *link,
                               unsigned int reg, u32 *val)
 {
        struct piix_host_priv *hpriv = link->ap->host->private_data;
-       unsigned long flags;
 
        if (reg >= ARRAY_SIZE(piix_sidx_map))
                return -EINVAL;
 
-       spin_lock_irqsave(&hpriv->sidpr_lock, flags);
        piix_sidpr_sel(link, reg);
        *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
-       spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
        return 0;
 }
 
@@ -965,15 +953,12 @@ static int piix_sidpr_scr_write(struct ata_link *link,
                                unsigned int reg, u32 val)
 {
        struct piix_host_priv *hpriv = link->ap->host->private_data;
-       unsigned long flags;
 
        if (reg >= ARRAY_SIZE(piix_sidx_map))
                return -EINVAL;
 
-       spin_lock_irqsave(&hpriv->sidpr_lock, flags);
        piix_sidpr_sel(link, reg);
        iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
-       spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
        return 0;
 }
 
@@ -1562,7 +1547,6 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
        hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
        if (!hpriv)
                return -ENOMEM;
-       spin_lock_init(&hpriv->sidpr_lock);
 
        /* Save IOCFG, this will be used for cable detection, quirk
         * detection and restoration on detach.  This is necessary
index 0963cd6a1425aa479231d028e6003bd30b3d86e2..91fed3c93d660a8e7d2934e21142c3d1a8dfa6a2 100644 (file)
@@ -159,10 +159,6 @@ int libata_allow_tpm = 0;
 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
 
-static int atapi_an;
-module_param(atapi_an, int, 0444);
-MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
-
 MODULE_AUTHOR("Jeff Garzik");
 MODULE_DESCRIPTION("Library module for ATA devices");
 MODULE_LICENSE("GPL");
@@ -2574,8 +2570,7 @@ int ata_dev_configure(struct ata_device *dev)
                 * to enable ATAPI AN to discern between PHY status
                 * changed notifications and ATAPI ANs.
                 */
-               if (atapi_an &&
-                   (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+               if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
                    (!sata_pmp_attached(ap) ||
                     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
                        unsigned int err_mask;
@@ -4353,9 +4348,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
        { "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
 
-       /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
-       { "C300-CTFDDAC128MAG", "0001",         ATA_HORKAGE_NONCQ, },
-
        /* devices which puke on READ_NATIVE_MAX */
        { "HDS724040KLSA80",    "KFAOA20N",     ATA_HORKAGE_BROKEN_HPA, },
        { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -5504,7 +5496,6 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  */
 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
-       unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
        /*
@@ -5513,18 +5504,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
         */
        ata_lpm_enable(host);
 
-       /*
-        * On some hardware, device fails to respond after spun down
-        * for suspend.  As the device won't be used before being
-        * resumed, we don't need to touch the device.  Ask EH to skip
-        * the usual stuff and proceed directly to suspend.
-        *
-        * http://thread.gmane.org/gmane.linux.ide/46764
-        */
-       if (mesg.event == PM_EVENT_SUSPEND)
-               ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
-
-       rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
+       rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
        if (rc == 0)
                host->dev->power.power_state = mesg;
        return rc;
index fa9bed06b397b3c845e75a3d1820df8e524a4a27..7d8d3c3b4c80b09dbf5f9d7742094cffd2f212c6 100644 (file)
@@ -870,8 +870,6 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
-       struct request_queue *q = qc->scsicmd->device->request_queue;
-       unsigned long flags;
 
        WARN_ON(!ap->ops->error_handler);
 
@@ -883,9 +881,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
         * Note that ATA_QCFLAG_FAILED is unconditionally set after
         * this function completes.
         */
-       spin_lock_irqsave(q->queue_lock, flags);
        blk_abort_request(qc->scsicmd->request);
-       spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 /**
@@ -1619,7 +1615,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        }
 
        /* okay, this error is ours */
-       memset(&tf, 0, sizeof(tf));
        rc = ata_eh_read_log_10h(dev, &tag, &tf);
        if (rc) {
                ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
@@ -3149,10 +3144,6 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        if (link->flags & ATA_LFLAG_DISABLED)
                return 1;
 
-       /* skip if explicitly requested */
-       if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
-               return 1;
-
        /* thaw frozen port and recover failed devices */
        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
                return 0;
index a158a6c925d07149fee6ca10f08e89ac65b0ed52..b4ee28dec5218b19db9682e729458e4d6b30d2bb 100644 (file)
@@ -2497,11 +2497,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
                 *
                 * If door lock fails, always clear sdev->locked to
                 * avoid this infinite loop.
-                *
-                * This may happen before SCSI scan is complete.  Make
-                * sure qc->dev->sdev isn't NULL before dereferencing.
                 */
-               if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
+               if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
                        qc->dev->sdev->locked = 0;
 
                qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
@@ -2828,7 +2825,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
         * write indication (used for PIO/DMA setup), result TF is
         * copied back and we don't whine too much about its failure.
         */
-       tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+       tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
        if (scmd->sc_data_direction == DMA_TO_DEVICE)
                tf->flags |= ATA_TFLAG_WRITE;
 
index 776a89599448331bae591b3986a429608d7ad4e0..2ae15c3b22a7356ba89f08005b761d6e74f5b6f5 100644 (file)
@@ -893,7 +893,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
                                       do_write);
        }
 
-       if (!do_write && !PageSlab(page))
+       if (!do_write)
                flush_dcache_page(page);
 
        qc->curbytes += qc->sect_size;
index 9434114b2ca8cb6f42e40d4ed2091360cd2add9e..1432dc9d0ab819d899ac3a506606a6778b146f30 100644 (file)
@@ -453,9 +453,7 @@ static void ali_init_chipset(struct pci_dev *pdev)
                        /* Clear CD-ROM DMA write bit */
                        tmp &= 0x7F;
                /* Cable and UDMA */
-               if (pdev->revision >= 0xc2)
-                       tmp |= 0x01;
-               pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
+               pci_write_config_byte(pdev, 0x4B, tmp | 0x09);
                /*
                 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
                 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
index d9f2913f159b3a086b6d1ce6c3d4d59bd6292bab..d16e87e29189a64ae2e88aaec8b887f37d174183 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pata_hpt3x2n"
-#define DRV_VERSION    "0.3.9"
+#define DRV_VERSION    "0.3.8"
 
 enum {
        HPT_PCI_FAST    =       (1 << 31),
@@ -547,16 +547,16 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
               pci_mhz);
        /* Set our private data up. We only need a few flags so we use
           it directly */
-       if (pci_mhz > 60)
+       if (pci_mhz > 60) {
                hpriv = (void *)(PCI66 | USE_DPLL);
-
-       /*
-        * On  HPT371N, if ATA clock is 66 MHz we must set bit 2 in
-        * the MISC. register to stretch the UltraDMA Tss timing.
-        * NOTE: This register is only writeable via I/O space.
-        */
-       if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
-               outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
+               /*
+                * On  HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+                * the MISC. register to stretch the UltraDMA Tss timing.
+                * NOTE: This register is only writeable via I/O space.
+                */
+               if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
+                       outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
+       }
 
        /* Now kick off ATA set up */
        return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv);
index 29111205185a155727b7f4a3a8f996cb928df8b0..2f3c9bed63d99925a02d20b2e93e780340508877 100644 (file)
@@ -2,7 +2,7 @@
  * pata_pdc202xx_old.c         - Promise PDC202xx PATA for new ATA layer
  *                       (C) 2005 Red Hat Inc
  *                       Alan Cox <alan@lxorguk.ukuu.org.uk>
- *                       (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz
+ *                       (C) 2007,2009 Bartlomiej Zolnierkiewicz
  *
  * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
  *
@@ -35,15 +35,6 @@ static int pdc2026x_cable_detect(struct ata_port *ap)
        return ATA_CBL_PATA80;
 }
 
-static void pdc202xx_exec_command(struct ata_port *ap,
-                                 const struct ata_taskfile *tf)
-{
-       DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
-       iowrite8(tf->command, ap->ioaddr.command_addr);
-       ndelay(400);
-}
-
 /**
  *     pdc202xx_configure_piomode      -       set chip PIO timing
  *     @ap: ATA interface
@@ -280,8 +271,6 @@ static struct ata_port_operations pdc2024x_port_ops = {
        .cable_detect           = ata_cable_40wire,
        .set_piomode            = pdc202xx_set_piomode,
        .set_dmamode            = pdc202xx_set_dmamode,
-
-       .sff_exec_command       = pdc202xx_exec_command,
 };
 
 static struct ata_port_operations pdc2026x_port_ops = {
@@ -295,8 +284,6 @@ static struct ata_port_operations pdc2026x_port_ops = {
        .dev_config             = pdc2026x_dev_config,
 
        .port_start             = pdc2026x_port_start,
-
-       .sff_exec_command       = pdc202xx_exec_command,
 };
 
 static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
index 1d73b8d236ed5401024e755477313685a04d9297..88984b803d6dab2c2137c8203ad1669f8719ceff 100644 (file)
@@ -661,7 +661,6 @@ static const struct pci_device_id via[] = {
        { PCI_VDEVICE(VIA, 0x3164), },
        { PCI_VDEVICE(VIA, 0x5324), },
        { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
-       { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
 
        { },
 };
index cf41126ff426d70c68f42e406e09db1cf5241021..6f5093b7c8c597ad63078d98763a222adf0d0a09 100644 (file)
@@ -1879,25 +1879,19 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
  *     LOCKING:
  *     Inherited from caller.
  */
-static void mv_bmdma_stop_ap(struct ata_port *ap)
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
 {
+       struct ata_port *ap = qc->ap;
        void __iomem *port_mmio = mv_ap_base(ap);
        u32 cmd;
 
        /* clear start/stop bit */
        cmd = readl(port_mmio + BMDMA_CMD);
-       if (cmd & ATA_DMA_START) {
-               cmd &= ~ATA_DMA_START;
-               writelfl(cmd, port_mmio + BMDMA_CMD);
-
-               /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
-               ata_sff_dma_pause(ap);
-       }
-}
+       cmd &= ~ATA_DMA_START;
+       writelfl(cmd, port_mmio + BMDMA_CMD);
 
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
-{
-       mv_bmdma_stop_ap(qc->ap);
+       /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+       ata_sff_dma_pause(ap);
 }
 
 /**
@@ -1921,21 +1915,8 @@ static u8 mv_bmdma_status(struct ata_port *ap)
        reg = readl(port_mmio + BMDMA_STATUS);
        if (reg & ATA_DMA_ACTIVE)
                status = ATA_DMA_ACTIVE;
-       else if (reg & ATA_DMA_ERR)
+       else
                status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
-       else {
-               /*
-                * Just because DMA_ACTIVE is 0 (DMA completed),
-                * this does _not_ mean the device is "done".
-                * So we should not yet be signalling ATA_DMA_INTR
-                * in some cases.  Eg. DSM/TRIM, and perhaps others.
-                */
-               mv_bmdma_stop_ap(ap);
-               if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
-                       status = 0;
-               else
-                       status = ATA_DMA_INTR;
-       }
        return status;
 }
 
@@ -1995,9 +1976,6 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
 
        switch (tf->protocol) {
        case ATA_PROT_DMA:
-               if (tf->command == ATA_CMD_DSM)
-                       return;
-               /* fall-thru */
        case ATA_PROT_NCQ:
                break;  /* continue below */
        case ATA_PROT_PIO:
@@ -2097,8 +2075,6 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
        if ((tf->protocol != ATA_PROT_DMA) &&
            (tf->protocol != ATA_PROT_NCQ))
                return;
-       if (tf->command == ATA_CMD_DSM)
-               return;  /* use bmdma for this */
 
        /* Fill in Gen IIE command request block */
        if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2294,12 +2270,6 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
 
        switch (qc->tf.protocol) {
        case ATA_PROT_DMA:
-               if (qc->tf.command == ATA_CMD_DSM) {
-                       if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
-                               return AC_ERR_OTHER;
-                       break;  /* use bmdma for this */
-               }
-               /* fall thru */
        case ATA_PROT_NCQ:
                mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
                pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
index ae2297cd29d886e87d784193765e6cbc6c4abaee..1eb4e020eb5ce3be2ab78cf2f3760f40a41f3b20 100644 (file)
@@ -1673,6 +1673,7 @@ static void nv_mcp55_freeze(struct ata_port *ap)
        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
        mask &= ~(NV_INT_ALL_MCP55 << shift);
        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+       ata_sff_freeze(ap);
 }
 
 static void nv_mcp55_thaw(struct ata_port *ap)
@@ -1686,6 +1687,7 @@ static void nv_mcp55_thaw(struct ata_port *ap)
        mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
        mask |= (NV_INT_MASK_MCP55 << shift);
        writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+       ata_sff_thaw(ap);
 }
 
 static void nv_adma_error_handler(struct ata_port *ap)
@@ -2476,7 +2478,8 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        pci_set_master(pdev);
-       return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
+       return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
+                                IRQF_SHARED, ipriv->sht);
 }
 
 #ifdef CONFIG_PM
index e35596b97853bd624c95c2d9ff8f399def6fb533..02efd9a83d26a06df084d5fffeaf1860d7aa859f 100644 (file)
@@ -558,19 +558,6 @@ static void svia_configure(struct pci_dev *pdev)
                tmp8 |= NATIVE_MODE_ALL;
                pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
        }
-
-       /*
-        * vt6421 has problems talking to some drives.  The following
-        * is the magic fix from Joseph Chan <JosephChan@via.com.tw>.
-        * Please add proper documentation if possible.
-        *
-        * https://bugzilla.kernel.org/show_bug.cgi?id=15173
-        */
-       if (pdev->device == 0x3249) {
-               pci_read_config_byte(pdev, 0x52, &tmp8);
-               tmp8 |= 1 << 2;
-               pci_write_config_byte(pdev, 0x52, tmp8);
-       }
 }
 
 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
index a73f10278171ed4a8c3a6adb96a62f15d30dc4b5..c5f5186d62a3160404a50b5819c440239b311870 100644 (file)
@@ -774,8 +774,7 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
        sk_for_each(s, node, head) {
                vcc = atm_sk(s);
                if (vcc->dev == dev && vcc->vci == vci &&
-                   vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
-                   test_bit(ATM_VF_READY, &vcc->flags))
+                   vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE)
                        goto out;
        }
        vcc = NULL;
@@ -901,10 +900,6 @@ static void pclose(struct atm_vcc *vcc)
        clear_bit(ATM_VF_ADDR, &vcc->flags);
        clear_bit(ATM_VF_READY, &vcc->flags);
 
-       /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the
-          tasklet has finished processing any incoming packets (and, more to
-          the point, using the vcc pointer). */
-       tasklet_unlock_wait(&card->tlet);
        return;
 }
 
index f33d768164ac983cb68ee6c3ade5c80d947038da..109317948d5d0ef13d2e870a3394befb4dbe9184 100644 (file)
@@ -603,7 +603,6 @@ static struct kobject *get_device_parent(struct device *dev,
        int retval;
 
        if (dev->class) {
-               static DEFINE_MUTEX(gdp_mutex);
                struct kobject *kobj = NULL;
                struct kobject *parent_kobj;
                struct kobject *k;
@@ -620,8 +619,6 @@ static struct kobject *get_device_parent(struct device *dev,
                else
                        parent_kobj = &parent->kobj;
 
-               mutex_lock(&gdp_mutex);
-
                /* find our class-directory at the parent and reference it */
                spin_lock(&dev->class->p->class_dirs.list_lock);
                list_for_each_entry(k, &dev->class->p->class_dirs.list, entry)
@@ -630,26 +627,20 @@ static struct kobject *get_device_parent(struct device *dev,
                                break;
                        }
                spin_unlock(&dev->class->p->class_dirs.list_lock);
-               if (kobj) {
-                       mutex_unlock(&gdp_mutex);
+               if (kobj)
                        return kobj;
-               }
 
                /* or create a new class-directory at the parent device */
                k = kobject_create();
-               if (!k) {
-                       mutex_unlock(&gdp_mutex);
+               if (!k)
                        return NULL;
-               }
                k->kset = &dev->class->p->class_dirs;
                retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
                if (retval < 0) {
-                       mutex_unlock(&gdp_mutex);
                        kobject_put(k);
                        return NULL;
                }
                /* do not emit an uevent for this simple "glue" directory */
-               mutex_unlock(&gdp_mutex);
                return k;
        }
 
index 1e2196fef1c75ecf0bcfeef1dab216856e923605..e62a4ccea54d0c4a3dc6dcd84eb107e557bcb54c 100644 (file)
@@ -149,7 +149,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
        /* display offline cpus < nr_cpu_ids */
        if (!alloc_cpumask_var(&offline, GFP_KERNEL))
                return -ENOMEM;
-       cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
+       cpumask_complement(offline, cpu_online_mask);
        n = cpulist_scnprintf(buf, len, offline);
        free_cpumask_var(offline);
 
index 4d809667815e5d64ba72ef6b57146c8174d87170..33faaa22a19dae234e7bfe97b5d25f1eabb8e645 100644 (file)
@@ -295,19 +295,6 @@ int devtmpfs_delete_node(struct device *dev)
                if (dentry->d_inode) {
                        err = vfs_getattr(nd.path.mnt, dentry, &stat);
                        if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
-                               struct iattr newattrs;
-                               /*
-                                * before unlinking this node, reset permissions
-                                * of possible references like hardlinks
-                                */
-                               newattrs.ia_uid = 0;
-                               newattrs.ia_gid = 0;
-                               newattrs.ia_mode = stat.mode & ~0777;
-                               newattrs.ia_valid =
-                                       ATTR_UID|ATTR_GID|ATTR_MODE;
-                               mutex_lock(&dentry->d_inode->i_mutex);
-                               notify_change(dentry, &newattrs);
-                               mutex_unlock(&dentry->d_inode->i_mutex);
                                err = vfs_unlink(nd.path.dentry->d_inode,
                                                 dentry);
                                if (!err || err == -ENOENT)
index de2e5e25f5bea1aa4ea57d78b8c2d8d8c2307013..cb07001513f1e942448c510af6e841cf318cde3f 100644 (file)
@@ -125,17 +125,6 @@ static ssize_t firmware_loading_show(struct device *dev,
        return sprintf(buf, "%d\n", loading);
 }
 
-static void firmware_free_data(const struct firmware *fw)
-{
-       int i;
-       vunmap(fw->data);
-       if (fw->pages) {
-               for (i = 0; i < PFN_UP(fw->size); i++)
-                       __free_page(fw->pages[i]);
-               kfree(fw->pages);
-       }
-}
-
 /* Some architectures don't have PAGE_KERNEL_RO */
 #ifndef PAGE_KERNEL_RO
 #define PAGE_KERNEL_RO PAGE_KERNEL
@@ -168,21 +157,21 @@ static ssize_t firmware_loading_store(struct device *dev,
                        mutex_unlock(&fw_lock);
                        break;
                }
-               firmware_free_data(fw_priv->fw);
-               memset(fw_priv->fw, 0, sizeof(struct firmware));
-               /* If the pages are not owned by 'struct firmware' */
+               vfree(fw_priv->fw->data);
+               fw_priv->fw->data = NULL;
                for (i = 0; i < fw_priv->nr_pages; i++)
                        __free_page(fw_priv->pages[i]);
                kfree(fw_priv->pages);
                fw_priv->pages = NULL;
                fw_priv->page_array_size = 0;
                fw_priv->nr_pages = 0;
+               fw_priv->fw->size = 0;
                set_bit(FW_STATUS_LOADING, &fw_priv->status);
                mutex_unlock(&fw_lock);
                break;
        case 0:
                if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
-                       vunmap(fw_priv->fw->data);
+                       vfree(fw_priv->fw->data);
                        fw_priv->fw->data = vmap(fw_priv->pages,
                                                 fw_priv->nr_pages,
                                                 0, PAGE_KERNEL_RO);
@@ -190,10 +179,7 @@ static ssize_t firmware_loading_store(struct device *dev,
                                dev_err(dev, "%s: vmap() failed\n", __func__);
                                goto err;
                        }
-                       /* Pages are now owned by 'struct firmware' */
-                       fw_priv->fw->pages = fw_priv->pages;
-                       fw_priv->pages = NULL;
-
+                       /* Pages will be freed by vfree() */
                        fw_priv->page_array_size = 0;
                        fw_priv->nr_pages = 0;
                        complete(&fw_priv->completion);
@@ -586,7 +572,7 @@ release_firmware(const struct firmware *fw)
                        if (fw->data == builtin->data)
                                goto free_fw;
                }
-               firmware_free_data(fw);
+               vfree(fw->data);
        free_fw:
                kfree(fw);
        }
index 1c21a3f238689012e8cbcaca2d8a537472b2aef3..bd112c8c7bcde9b3efdbceadc6fc89d65f6c9ea0 100644 (file)
@@ -238,8 +238,6 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
                if (ret)
                        goto fail;
 
-               file_update_time(file);
-
                transfer_result = lo_do_transfer(lo, WRITE, page, offset,
                                bvec->bv_page, bv_offs, size, IV);
                copied = size;
index 0e9c5646c5981504e8278bf47b3d65c7e4e78903..1be76318f516aadf4108b92221d732dfcaaca1fe 100644 (file)
@@ -59,9 +59,6 @@ static struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
-       /* Apple iMac11,1 */
-       { USB_DEVICE(0x05ac, 0x8215) },
-
        /* AVM BlueFRITZ! USB v2.0 */
        { USB_DEVICE(0x057c, 0x3800) },
 
index ac2f0f23b6eb456e9e35dffa487cee208bcc5d6b..51ff3ef58ec75610b70afeb0cc4428d461fa69b5 100755 (executable)
@@ -265,16 +265,9 @@ static int hci_uart_tty_open(struct tty_struct *tty)
 
        BT_DBG("tty %p", tty);
 
-       /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
-          the pointer */
        if (hu)
                return -EEXIST;
 
-       /* Error if the tty has no write op instead of leaving an exploitable
-          hole */
-       if (tty->ops->write == NULL)
-               return -EOPNOTSUPP;
-
        if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
                BT_ERR("Can't allocate control structure");
                return -ENFILE;
index 70d56b6836308f6943428f822c7c5cc0347195f1..ccb1fa89de2976e99bcb5d7bb94094f989e3b974 100644 (file)
@@ -57,7 +57,7 @@ config AGP_AMD
 
 config AGP_AMD64
        tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
-       depends on AGP && X86 && K8_NB
+       depends on AGP && X86
        default y if GART_IOMMU
        help
          This option gives you AGP support for the GLX component of
index c496c8a1a885d07396547e27b1eeb0107e0cc95e..2fb2e6cc322aab1a36164de3fc99674559dea9ed 100644 (file)
@@ -499,10 +499,6 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
        u8 cap_ptr;
        int err;
 
-       /* The Highlander principle */
-       if (agp_bridges_found)
-               return -ENODEV;
-
        cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
        if (!cap_ptr)
                return -ENODEV;
@@ -566,8 +562,6 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
                           amd64_aperture_sizes[bridge->aperture_size_idx].size);
        agp_remove_bridge(bridge);
        agp_put_bridge(bridge);
-
-       agp_bridges_found--;
 }
 
 #ifdef CONFIG_PM
@@ -715,11 +709,6 @@ static struct pci_device_id agp_amd64_pci_table[] = {
 
 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
 
-static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
-       { PCI_DEVICE_CLASS(0, 0) },
-       { }
-};
-
 static struct pci_driver agp_amd64_pci_driver = {
        .name           = "agpgart-amd64",
        .id_table       = agp_amd64_pci_table,
@@ -744,6 +733,7 @@ int __init agp_amd64_init(void)
                return err;
 
        if (agp_bridges_found == 0) {
+               struct pci_dev *dev;
                if (!agp_try_unsupported && !agp_try_unsupported_boot) {
                        printk(KERN_INFO PFX "No supported AGP bridge found.\n");
 #ifdef MODULE
@@ -759,10 +749,17 @@ int __init agp_amd64_init(void)
                        return -ENODEV;
 
                /* Look for any AGP bridge */
-               agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
-               err = driver_attach(&agp_amd64_pci_driver.driver);
-               if (err == 0 && agp_bridges_found == 0)
-                       err = -ENODEV;
+               dev = NULL;
+               err = -ENODEV;
+               for_each_pci_dev(dev) {
+                       if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
+                               continue;
+                       /* Only one bridge supported right now */
+                       if (agp_amd64_probe(dev, NULL) == 0) {
+                               err = 0;
+                               break;
+                       }
+               }
        }
        return err;
 }
index dc8a6f70483b51a8f1c1945959fcf4cbc552bfaa..9047b2714653c54ef3c0130f30e09cd98e0bc534 100644 (file)
@@ -488,8 +488,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
        handle = obj;
        do {
                status = acpi_get_object_info(handle, &info);
-               if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
+               if (ACPI_SUCCESS(status)) {
                        /* TBD check _CID also */
+                       info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
                        match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
                        kfree(info);
                        if (match) {
index b8e02190bef4302f2fc3d490b7cdb1bc54d5e2e9..4dcfef05045a0822ab5a8054ac6300fe086860f8 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/agp_backend.h>
-#include <asm/smp.h>
 #include "agp.h"
 
 /*
@@ -816,6 +815,12 @@ static void intel_i830_setup_flush(void)
                intel_i830_fini_flush();
 }
 
+static void
+do_wbinvd(void *null)
+{
+       wbinvd();
+}
+
 /* The chipset_flush interface needs to get data that has already been
  * flushed out of the CPU all the way out to main memory, because the GPU
  * doesn't snoop those buffers.
@@ -832,10 +837,12 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
 
        memset(pg, 0, 1024);
 
-       if (cpu_has_clflush)
+       if (cpu_has_clflush) {
                clflush_cache_range(pg, 1024);
-       else if (wbinvd_on_all_cpus() != 0)
-               printk(KERN_ERR "Timed out waiting for cache flush.\n");
+       } else {
+               if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
+                       printk(KERN_ERR "Timed out waiting for cache flush.\n");
+       }
 }
 
 /* The intel i830 automatically initializes the agp aperture during POST.
index 95fdd4d3a6b1a310e1b987d5d2196626b148e37b..6c3837a0184dd8ca0f56ca0c12f90243a0914dd8 100644 (file)
@@ -415,6 +415,14 @@ static struct pci_device_id agp_sis_pci_table[] = {
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
        },
+       {
+               .class          = (PCI_CLASS_BRIDGE_HOST << 8),
+               .class_mask     = ~0,
+               .vendor         = PCI_VENDOR_ID_SI,
+               .device         = PCI_DEVICE_ID_SI_760,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+       },
        { }
 };
 
index 006466d2a830a34850f6be195cf2c454266b2cf8..70a770ac013875a3e4b5200b954a21197623e2e7 100644 (file)
@@ -476,21 +476,6 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
        if (irq) {
                unsigned long irq_flags;
 
-               if (devp->hd_flags & HPET_SHARED_IRQ) {
-                       /*
-                        * To prevent the interrupt handler from seeing an
-                        * unwanted interrupt status bit, program the timer
-                        * so that it will not fire in the near future ...
-                        */
-                       writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
-                              &timer->hpet_config);
-                       write_counter(read_counter(&hpet->hpet_mc),
-                                     &timer->hpet_compare);
-                       /* ... and clear any left-over status. */
-                       isr = 1 << (devp - devp->hd_hpets->hp_dev);
-                       writel(isr, &hpet->hpet_isr);
-               }
-
                sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
                irq_flags = devp->hd_flags & HPET_SHARED_IRQ
                                                ? IRQF_SHARED : IRQF_DISABLED;
@@ -985,8 +970,6 @@ static int hpet_acpi_add(struct acpi_device *device)
                return -ENODEV;
 
        if (!data.hd_address || !data.hd_nirqs) {
-               if (data.hd_address)
-                       iounmap(data.hd_address);
                printk("%s: no address or irqs in _CRS\n", __func__);
                return -ENODEV;
        }
index abae8c99259411c6454261b9d01d02b0f9d3ef92..d2e698096ace182698152e2366d7c2dc2cf91342 100644 (file)
@@ -310,14 +310,9 @@ static void deliver_recv_msg(struct smi_info *smi_info,
 {
        /* Deliver the message to the upper layer with the lock
           released. */
-
-       if (smi_info->run_to_completion) {
-               ipmi_smi_msg_received(smi_info->intf, msg);
-       } else {
-               spin_unlock(&(smi_info->si_lock));
-               ipmi_smi_msg_received(smi_info->intf, msg);
-               spin_lock(&(smi_info->si_lock));
-       }
+       spin_unlock(&(smi_info->si_lock));
+       ipmi_smi_msg_received(smi_info->intf, msg);
+       spin_lock(&(smi_info->si_lock));
 }
 
 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
index ba1db9b601d2ba43ef7f7f5e84297f79a2761dfc..6df298845b3aeccc9575758a968e1449b8223164 100644 (file)
@@ -837,11 +837,10 @@ static const struct file_operations zero_fops = {
 /*
  * capabilities for /dev/zero
  * - permits private mappings, "copies" are taken of the source of zeros
- * - no writeback happens
  */
 static struct backing_dev_info zero_bdi = {
        .name           = "char/mem",
-       .capabilities   = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
+       .capabilities   = BDI_CAP_MAP_COPY,
 };
 
 static const struct file_operations full_fops = {
index 71f0d72fc2f3af5e3e998b404e2a46d671be4251..88cee4099be940c294127d0a172996d90c597912 100644 (file)
@@ -265,16 +265,10 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
        unsigned char contents[NVRAM_BYTES];
        unsigned i = *ppos;
        unsigned char *tmp;
+       int len;
 
-       if (i >= NVRAM_BYTES)
-               return 0;       /* Past EOF */
-
-       if (count > NVRAM_BYTES - i)
-               count = NVRAM_BYTES - i;
-       if (count > NVRAM_BYTES)
-               return -EFAULT; /* Can't happen, but prove it to gcc */
-
-       if (copy_from_user(contents, buf, count))
+       len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count;
+       if (copy_from_user(contents, buf, len))
                return -EFAULT;
 
        spin_lock_irq(&rtc_lock);
@@ -282,7 +276,7 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
        if (!__nvram_check_checksum())
                goto checksum_err;
 
-       for (tmp = contents; count--; ++i, ++tmp)
+       for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
                __nvram_write_byte(*tmp, i);
 
        __nvram_set_checksum();
index a08c8994c89d9975257f3658d3376b54cd222334..caf6e4d194696204fa41632e220369a2ca2876a7 100644 (file)
@@ -4164,8 +4164,6 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        if (cmd != SIOCWANDEV)
                return hdlc_ioctl(dev, ifr, cmd);
 
-       memset(&new_line, 0, size);
-
        switch(ifr->ifr_settings.type) {
        case IF_GET_IFACE: /* return current sync_serial_settings */
 
index 9abc3a19d53a033e7ff64dd5e68306fc97a2ddb4..64acd05f71c8743330e47a1d9883fc59acc22081 100644 (file)
@@ -247,7 +247,6 @@ static const struct file_operations raw_fops = {
        .aio_read =     generic_file_aio_read,
        .write  =       do_sync_write,
        .aio_write =    blkdev_aio_write,
-       .fsync  =       block_fsync,
        .open   =       raw_open,
        .release=       raw_release,
        .ioctl  =       raw_ioctl,
index 792868d24f2a0f2ad6bc21a4967ace01a2f84d92..8e00b4ddd0830699bca8bab65c3e855657b21978 100644 (file)
@@ -224,7 +224,6 @@ struct      tpm_readpubek_params_out {
        u8      algorithm[4];
        u8      encscheme[2];
        u8      sigscheme[2];
-       __be32  paramsize;
        u8      parameters[12]; /*assuming RSA*/
        __be32  keysize;
        u8      modulus[256];
index ca15c04e5a8d9601cfbce2bc224487b10847ed66..0b73e4ec1addafff42a79752a666fecd4b25d665 100644 (file)
@@ -257,10 +257,6 @@ out:
        return size;
 }
 
-static int itpm;
-module_param(itpm, bool, 0444);
-MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
-
 /*
  * If interrupts are used (signaled by an irq set in the vendor structure)
  * tpm.c can skip polling for the data to be available as the interrupt is
@@ -297,7 +293,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
                wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
                              &chip->vendor.int_queue);
                status = tpm_tis_status(chip);
-               if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
+               if ((status & TPM_STS_DATA_EXPECT) == 0) {
                        rc = -EIO;
                        goto out_err;
                }
@@ -471,10 +467,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
                 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
                 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
 
-       if (itpm)
-               dev_info(dev, "Intel iTPM workaround enabled\n");
-
-
        /* Figure out the capabilities */
        intfcaps =
            ioread32(chip->vendor.iobase +
@@ -622,14 +614,7 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
 
 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
 {
-       struct tpm_chip *chip = pnp_get_drvdata(dev);
-       int ret;
-
-       ret = tpm_pm_resume(&dev->dev);
-       if (!ret)
-               tpm_continue_selftest(chip);
-
-       return ret;
+       return tpm_pm_resume(&dev->dev);
 }
 
 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
@@ -644,7 +629,6 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
        {"", 0},                /* User Specified */
        {"", 0}                 /* Terminator */
 };
-MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
 
 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
 {
index 9605ee5b931e4ea98bd461d45ddd75c5dfebf7d6..66fa4e10d76bd47e1192a4593a488e8491e09b68 100644 (file)
@@ -247,8 +247,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
 {
        int copied = 0;
        do {
-               int goal = min(size - copied, TTY_BUFFER_PAGE);
-               int space = tty_buffer_request_room(tty, goal);
+               int space = tty_buffer_request_room(tty, size - copied);
                struct tty_buffer *tb = tty->buf.tail;
                /* If there is no space then tb may be NULL */
                if (unlikely(space == 0))
@@ -284,8 +283,7 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
 {
        int copied = 0;
        do {
-               int goal = min(size - copied, TTY_BUFFER_PAGE);
-               int space = tty_buffer_request_room(tty, goal);
+               int space = tty_buffer_request_room(tty, size - copied);
                struct tty_buffer *tb = tty->buf.tail;
                /* If there is no space then tb may be NULL */
                if (unlikely(space == 0))
@@ -412,8 +410,7 @@ static void flush_to_ldisc(struct work_struct *work)
        spin_lock_irqsave(&tty->buf.lock, flags);
 
        if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
-               struct tty_buffer *head, *tail = tty->buf.tail;
-               int seen_tail = 0;
+               struct tty_buffer *head;
                while ((head = tty->buf.head) != NULL) {
                        int count;
                        char *char_buf;
@@ -423,15 +420,6 @@ static void flush_to_ldisc(struct work_struct *work)
                        if (!count) {
                                if (head->next == NULL)
                                        break;
-                               /*
-                                 There's a possibility tty might get new buffer
-                                 added during the unlock window below. We could
-                                 end up spinning in here forever hogging the CPU
-                                 completely. To avoid this let's have a rest each
-                                 time we processed the tail buffer.
-                               */
-                               if (tail == head)
-                                       seen_tail = 1;
                                tty->buf.head = head->next;
                                tty_buffer_free(tty, head);
                                continue;
@@ -441,7 +429,7 @@ static void flush_to_ldisc(struct work_struct *work)
                           line discipline as we want to empty the queue */
                        if (test_bit(TTY_FLUSHPENDING, &tty->flags))
                                break;
-                       if (!tty->receive_room || seen_tail) {
+                       if (!tty->receive_room) {
                                schedule_delayed_work(&tty->buf.work, 1);
                                break;
                        }
index 53ffcfc1154bef0b72f33fc362905c5a6c250d35..05cab2cea85ed6a4022519d0000b9980c4e08d5b 100644 (file)
@@ -1408,8 +1408,6 @@ static void release_one_tty(struct work_struct *work)
        list_del_init(&tty->tty_files);
        file_list_unlock();
 
-       put_pid(tty->pgrp);
-       put_pid(tty->session);
        free_tty_struct(tty);
 }
 
index 8b9f1a5c8be87e63ae41794317915d4744eb368a..feb55075819bab4f7d9f75ce11f7aa7232ba9dc1 100644 (file)
@@ -45,7 +45,6 @@
 
 static DEFINE_SPINLOCK(tty_ldisc_lock);
 static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
-static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
 /* Line disc dispatch table */
 static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
 
@@ -82,7 +81,6 @@ static void put_ldisc(struct tty_ldisc *ld)
                return;
        }
        local_irq_restore(flags);
-       wake_up(&tty_ldisc_idle);
 }
 
 /**
@@ -444,14 +442,9 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
 
 static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
 {
-       int ret;
-
        WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
-       if (ld->ops->open) {
-               ret = ld->ops->open(tty);
-               if (ret)
-                       clear_bit(TTY_LDISC_OPEN, &tty->flags);
-       }
+       if (ld->ops->open)
+               return ld->ops->open(tty);
        return 0;
 }
 
@@ -528,23 +521,6 @@ static int tty_ldisc_halt(struct tty_struct *tty)
        return cancel_delayed_work_sync(&tty->buf.work);
 }
 
-/**
- *     tty_ldisc_wait_idle     -       wait for the ldisc to become idle
- *     @tty: tty to wait for
- *
- *     Wait for the line discipline to become idle. The discipline must
- *     have been halted for this to guarantee it remains idle.
- */
-static int tty_ldisc_wait_idle(struct tty_struct *tty)
-{
-       int ret;
-       ret = wait_event_interruptible_timeout(tty_ldisc_idle,
-                       atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
-       if (ret < 0)
-               return ret;
-       return ret > 0 ? 0 : -EBUSY;
-}
-
 /**
  *     tty_set_ldisc           -       set line discipline
  *     @tty: the terminal to set
@@ -640,16 +616,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        flush_scheduled_work();
 
-       retval = tty_ldisc_wait_idle(tty);
-
        mutex_lock(&tty->ldisc_mutex);
-
-       /* handle wait idle failure locked */
-       if (retval) {
-               tty_ldisc_put(new_ldisc);
-               goto enable;
-       }
-
        if (test_bit(TTY_HUPPED, &tty->flags)) {
                /* We were raced by the hangup method. It will have stomped
                   the ldisc data and closed the ldisc down */
@@ -682,7 +649,6 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        tty_ldisc_put(o_ldisc);
 
-enable:
        /*
         *      Allow ldisc referencing to occur again
         */
@@ -721,18 +687,14 @@ static void tty_reset_termios(struct tty_struct *tty)
 /**
  *     tty_ldisc_reinit        -       reinitialise the tty ldisc
  *     @tty: tty to reinit
- *     @ldisc: line discipline to reinitialize
  *
- *     Switch the tty to a line discipline and leave the ldisc
- *     state closed
+ *     Switch the tty back to N_TTY line discipline and leave the
+ *     ldisc state closed
  */
 
-static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+static void tty_ldisc_reinit(struct tty_struct *tty)
 {
-       struct tty_ldisc *ld = tty_ldisc_get(ldisc);
-
-       if (IS_ERR(ld))
-               return -1;
+       struct tty_ldisc *ld;
 
        tty_ldisc_close(tty, tty->ldisc);
        tty_ldisc_put(tty->ldisc);
@@ -740,10 +702,10 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
        /*
         *      Switch the line discipline back
         */
+       ld = tty_ldisc_get(N_TTY);
+       BUG_ON(IS_ERR(ld));
        tty_ldisc_assign(tty, ld);
-       tty_set_termios_ldisc(tty, ldisc);
-
-       return 0;
+       tty_set_termios_ldisc(tty, N_TTY);
 }
 
 /**
@@ -764,8 +726,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
 void tty_ldisc_hangup(struct tty_struct *tty)
 {
        struct tty_ldisc *ld;
-       int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
-       int err = 0;
 
        /*
         * FIXME! What are the locking issues here? This may me overdoing
@@ -793,35 +753,25 @@ void tty_ldisc_hangup(struct tty_struct *tty)
        wake_up_interruptible_poll(&tty->read_wait, POLLIN);
        /*
         * Shutdown the current line discipline, and reset it to
-        * N_TTY if need be.
-        *
-        * Avoid racing set_ldisc or tty_ldisc_release
+        * N_TTY.
         */
-       mutex_lock(&tty->ldisc_mutex);
-       tty_ldisc_halt(tty);
-       /* At this point we have a closed ldisc and we want to
-          reopen it. We could defer this to the next open but
-          it means auditing a lot of other paths so this is
-          a FIXME */
-       if (tty->ldisc) {       /* Not yet closed */
-               if (reset == 0) {
-
-                       if (!tty_ldisc_reinit(tty, tty->termios->c_line))
-                               err = tty_ldisc_open(tty, tty->ldisc);
-                       else
-                               err = 1;
-               }
-               /* If the re-open fails or we reset then go to N_TTY. The
-                  N_TTY open cannot fail */
-               if (reset || err) {
-                       BUG_ON(tty_ldisc_reinit(tty, N_TTY));
+       if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+               /* Avoid racing set_ldisc or tty_ldisc_release */
+               mutex_lock(&tty->ldisc_mutex);
+               tty_ldisc_halt(tty);
+               if (tty->ldisc) {       /* Not yet closed */
+                       /* Switch back to N_TTY */
+                       tty_ldisc_reinit(tty);
+                       /* At this point we have a closed ldisc and we want to
+                          reopen it. We could defer this to the next open but
+                          it means auditing a lot of other paths so this is
+                          a FIXME */
                        WARN_ON(tty_ldisc_open(tty, tty->ldisc));
+                       tty_ldisc_enable(tty);
                }
-               tty_ldisc_enable(tty);
-       }
-       mutex_unlock(&tty->ldisc_mutex);
-       if (reset)
+               mutex_unlock(&tty->ldisc_mutex);
                tty_reset_termios(tty);
+       }
 }
 
 /**
index 6351a2625dfe3783d7eac259a4d5e89548a34594..6aa10284104aeb6e4bc3a22833ea347e67c5d976 100644 (file)
@@ -503,7 +503,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        struct kbd_struct * kbd;
        unsigned int console;
        unsigned char ucval;
-       unsigned int uival;
        void __user *up = (void __user *)arg;
        int i, perm;
        int ret = 0;
@@ -658,7 +657,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGETMODE:
-               uival = vc->vc_mode;
+               ucval = vc->vc_mode;
                goto setint;
 
        case KDMAPDISP:
@@ -696,7 +695,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGKBMODE:
-               uival = ((kbd->kbdmode == VC_RAW) ? K_RAW :
+               ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
                                 (kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW :
                                 (kbd->kbdmode == VC_UNICODE) ? K_UNICODE :
                                 K_XLATE);
@@ -718,9 +717,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                break;
 
        case KDGKBMETA:
-               uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
+               ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
        setint:
-               ret = put_user(uival, (int __user *)arg);
+               ret = put_user(ucval, (int __user *)arg);
                break;
 
        case KDGETKEYCODE:
@@ -950,7 +949,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                for (i = 0; i < MAX_NR_CONSOLES; ++i)
                        if (! VT_IS_IN_USE(i))
                                break;
-               uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
+               ucval = i < MAX_NR_CONSOLES ? (i+1) : -1;
                goto setint;             
 
        /*
index 234d9f6bc69692b7d987500a9fd5de4d99772912..6b3e0c2f33e2b193838fbecc0dda72aa9f0db5ef 100644 (file)
@@ -413,10 +413,18 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
 static int sh_cmt_clocksource_enable(struct clocksource *cs)
 {
        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+       int ret;
 
        p->total_cycles = 0;
 
-       return sh_cmt_start(p, FLAG_CLOCKSOURCE);
+       ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
+       if (ret)
+               return ret;
+
+       /* TODO: calculate good shift from rate and counter bit width */
+       cs->shift = 0;
+       cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+       return 0;
 }
 
 static void sh_cmt_clocksource_disable(struct clocksource *cs)
@@ -436,18 +444,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
        cs->disable = sh_cmt_clocksource_disable;
        cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
-
-       /* clk_get_rate() needs an enabled clock */
-       clk_enable(p->clk);
-       p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
-       clk_disable(p->clk);
-
-       /* TODO: calculate good shift from rate and counter bit width */
-       cs->shift = 10;
-       cs->mult = clocksource_hz2mult(p->rate, cs->shift);
-
        pr_info("sh_cmt: %s used as clock source\n", cs->name);
-
        clocksource_register(cs);
        return 0;
 }
@@ -606,13 +603,18 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
        p->irqaction.handler = sh_cmt_interrupt;
        p->irqaction.dev_id = p;
        p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+       ret = setup_irq(irq, &p->irqaction);
+       if (ret) {
+               pr_err("sh_cmt: failed to request irq %d\n", irq);
+               goto err1;
+       }
 
        /* get hold of clock */
        p->clk = clk_get(&p->pdev->dev, cfg->clk);
        if (IS_ERR(p->clk)) {
                pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
                ret = PTR_ERR(p->clk);
-               goto err1;
+               goto err2;
        }
 
        if (resource_size(res) == 6) {
@@ -625,25 +627,14 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
                p->clear_bits = ~0xc000;
        }
 
-       ret = sh_cmt_register(p, cfg->name,
-                             cfg->clockevent_rating,
-                             cfg->clocksource_rating);
-       if (ret) {
-               pr_err("sh_cmt: registration failed\n");
-               goto err1;
-       }
-
-       ret = setup_irq(irq, &p->irqaction);
-       if (ret) {
-               pr_err("sh_cmt: failed to request irq %d\n", irq);
-               goto err1;
-       }
-
-       return 0;
-
-err1:
+       return sh_cmt_register(p, cfg->name,
+                              cfg->clockevent_rating,
+                              cfg->clocksource_rating);
+ err2:
+       remove_irq(irq, &p->irqaction);
+ err1:
        iounmap(p->mapbase);
-err0:
+ err0:
        return ret;
 }
 
index 4c8a759e60cdb56c14cf57002a9672f81100822e..973e714d605147d7e2110eaae48a25b780d32bc7 100644 (file)
@@ -221,15 +221,15 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
        ced->cpumask = cpumask_of(0);
        ced->set_mode = sh_mtu2_clock_event_mode;
 
-       pr_info("sh_mtu2: %s used for clock events\n", ced->name);
-       clockevents_register_device(ced);
-
        ret = setup_irq(p->irqaction.irq, &p->irqaction);
        if (ret) {
                pr_err("sh_mtu2: failed to request irq %d\n",
                       p->irqaction.irq);
                return;
        }
+
+       pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+       clockevents_register_device(ced);
 }
 
 static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
index c0732466fb8749a5a5f81d67e3e5245b48eb3227..93c2322feab79e8e119d0b51949ac0b1bc42135d 100644 (file)
@@ -199,8 +199,16 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
 static int sh_tmu_clocksource_enable(struct clocksource *cs)
 {
        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+       int ret;
+
+       ret = sh_tmu_enable(p);
+       if (ret)
+               return ret;
 
-       return sh_tmu_enable(p);
+       /* TODO: calculate good shift from rate and counter bit width */
+       cs->shift = 10;
+       cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+       return 0;
 }
 
 static void sh_tmu_clocksource_disable(struct clocksource *cs)
@@ -220,16 +228,6 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
        cs->disable = sh_tmu_clocksource_disable;
        cs->mask = CLOCKSOURCE_MASK(32);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
-
-       /* clk_get_rate() needs an enabled clock */
-       clk_enable(p->clk);
-       /* channel will be configured at parent clock / 4 */
-       p->rate = clk_get_rate(p->clk) / 4;
-       clk_disable(p->clk);
-       /* TODO: calculate good shift from rate and counter bit width */
-       cs->shift = 10;
-       cs->mult = clocksource_hz2mult(p->rate, cs->shift);
-
        pr_info("sh_tmu: %s used as clock source\n", cs->name);
        clocksource_register(cs);
        return 0;
@@ -325,15 +323,15 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
        ced->set_next_event = sh_tmu_clock_event_next;
        ced->set_mode = sh_tmu_clock_event_mode;
 
-       pr_info("sh_tmu: %s used for clock events\n", ced->name);
-       clockevents_register_device(ced);
-
        ret = setup_irq(p->irqaction.irq, &p->irqaction);
        if (ret) {
                pr_err("sh_tmu: failed to request irq %d\n",
                       p->irqaction.irq);
                return;
        }
+
+       pr_info("sh_tmu: %s used for clock events\n", ced->name);
+       clockevents_register_device(ced);
 }
 
 static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
index c18e65e572c7c3144440bad43ade9a7d507fc4b0..ff57c40e9b8b94905ca090e79698390a66ad2583 100644 (file)
@@ -1741,8 +1741,17 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
                        dprintk("governor switch\n");
 
                        /* end old governor */
-                       if (data->governor)
+                       if (data->governor) {
+                               /*
+                                * Need to release the rwsem around governor
+                                * stop due to lock dependency between
+                                * cancel_delayed_work_sync and the read lock
+                                * taken in the delayed work handler.
+                                */
+                               unlock_policy_rwsem_write(data->cpu);
                                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+                               lock_policy_rwsem_write(data->cpu);
+                       }
 
                        /* start new governor */
                        data->governor = policy->governor;
index f8e57c6303f2c17860d7f7700975ed5502743cb7..73655aeb3a60993e28fbbf9ea7930128e5a23407 100644 (file)
@@ -101,6 +101,7 @@ struct menu_device {
 
        unsigned int    expected_us;
        u64             predicted_us;
+       unsigned int    measured_us;
        unsigned int    exit_us;
        unsigned int    bucket;
        u64             correction_factor[BUCKETS];
@@ -186,14 +187,14 @@ static int menu_select(struct cpuidle_device *dev)
        int i;
        int multiplier;
 
+       data->last_state_idx = 0;
+       data->exit_us = 0;
+
        if (data->needs_update) {
                menu_update(dev);
                data->needs_update = 0;
        }
 
-       data->last_state_idx = 0;
-       data->exit_us = 0;
-
        /* Special case when user has set very strict latency requirement */
        if (unlikely(latency_req == 0))
                return 0;
@@ -293,7 +294,7 @@ static void menu_update(struct cpuidle_device *dev)
        new_factor = data->correction_factor[data->bucket]
                        * (DECAY - 1) / DECAY;
 
-       if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
+       if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
                new_factor += RESOLUTION * measured_us / data->expected_us;
        else
                /*
index 71e64824e8f73c1f88262d04d970d169f1c64a68..84c51e17726966c196ac53ce05671e972f65b98a 100644 (file)
@@ -285,7 +285,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
        if (initial)
                asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
                              : "+S" (input), "+D" (output), "+a" (iv)
-                             : "d" (control_word), "b" (key), "c" (initial));
+                             : "d" (control_word), "b" (key), "c" (count));
 
        asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
                      : "+S" (input), "+D" (output), "+a" (iv)
index f2b44d51d826cec394890d1578606c67e2fa7d11..466ab10c1ff10de1d001178fde9fdce203c410e3 100644 (file)
@@ -161,7 +161,7 @@ static int mv_is_err_intr(u32 intr_cause)
 
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 {
-       u32 val = ~(1 << (chan->idx * 16));
+       u32 val = (1 << (1 + (chan->idx * 16)));
        dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
        __raw_writel(val, XOR_INTR_CAUSE(chan));
 }
index 2aa339e94968e5294cb22e2a570719d82639f18a..01bc8e232456a665675b183946ef0f056cb96548 100644 (file)
@@ -156,7 +156,7 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
 
        default:
                amd64_printk(KERN_ERR, "Unsupported family!\n");
-               return -EINVAL;
+               break;
        }
        return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
                        min_scrubrate);
@@ -1491,7 +1491,7 @@ static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
        u64 chan_off;
 
        if (hi_range_sel) {
-               if (!(dct_sel_base_addr & 0xFFFF0000) &&
+               if (!(dct_sel_base_addr & 0xFFFFF800) &&
                   hole_valid && (sys_addr >= 0x100000000ULL))
                        chan_off = hole_off << 16;
                else
index 1999807f078ff51b8157d65a258f267c6626b93e..713ed7d372475dc325ac7cfca8cc5e22181af29b 100644 (file)
@@ -295,6 +295,7 @@ wrong_ls_mce:
 void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
 {
        u32 ec  = ERROR_CODE(regs->nbsl);
+       u32 xec = EXT_ERROR_CODE(regs->nbsl);
 
        if (!handle_errors)
                return;
@@ -310,15 +311,11 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
                if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
                        pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
        } else {
-               u8 assoc_cpus = regs->nbsh & 0xf;
-
-               if (assoc_cpus > 0)
-                       pr_cont(", core: %d", fls(assoc_cpus) - 1);
-
-               pr_cont("\n");
+               pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
        }
 
-       pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
+
+       pr_emerg("%s.\n", EXT_ERR_MSG(xec));
 
        if (BUS_ERROR(ec) && nb_bus_decoder)
                nb_bus_decoder(node_id, regs);
@@ -381,7 +378,7 @@ static void amd_decode_mce(struct mce *m)
                 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
 
        /* do the two bits[14:13] together */
-       ecc = (m->status >> 45) & 0x3;
+       ecc = m->status & (3ULL << 45);
        if (ecc)
                pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
 
index 3fc2ceb6d715c8aa224f3455a3e4c27a27c934ab..ed635ae2b5d3d785e03118d2d48835c7448aff5b 100644 (file)
@@ -239,7 +239,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
 static void fw_card_bm_work(struct work_struct *work)
 {
        struct fw_card *card = container_of(work, struct fw_card, work.work);
-       struct fw_device *root_device, *irm_device;
+       struct fw_device *root_device;
        struct fw_node *root_node;
        unsigned long flags;
        int root_id, new_root_id, irm_id, local_id;
@@ -247,7 +247,6 @@ static void fw_card_bm_work(struct work_struct *work)
        bool do_reset = false;
        bool root_device_is_running;
        bool root_device_is_cmc;
-       bool irm_is_1394_1995_only;
 
        spin_lock_irqsave(&card->lock, flags);
 
@@ -257,18 +256,12 @@ static void fw_card_bm_work(struct work_struct *work)
        }
 
        generation = card->generation;
-
        root_node = card->root_node;
        fw_node_get(root_node);
        root_device = root_node->data;
        root_device_is_running = root_device &&
                        atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
        root_device_is_cmc = root_device && root_device->cmc;
-
-       irm_device = card->irm_node->data;
-       irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
-                       (irm_device->config_rom[2] & 0x000000f0) == 0;
-
        root_id  = root_node->node_id;
        irm_id   = card->irm_node->node_id;
        local_id = card->local_node->node_id;
@@ -291,15 +284,8 @@ static void fw_card_bm_work(struct work_struct *work)
 
                if (!card->irm_node->link_on) {
                        new_root_id = local_id;
-                       fw_notify("%s, making local node (%02x) root.\n",
-                                 "IRM has link off", new_root_id);
-                       goto pick_me;
-               }
-
-               if (irm_is_1394_1995_only) {
-                       new_root_id = local_id;
-                       fw_notify("%s, making local node (%02x) root.\n",
-                                 "IRM is not 1394a compliant", new_root_id);
+                       fw_notify("IRM has link off, making local node (%02x) root.\n",
+                                 new_root_id);
                        goto pick_me;
                }
 
@@ -338,8 +324,8 @@ static void fw_card_bm_work(struct work_struct *work)
                         * root, and thus, IRM.
                         */
                        new_root_id = local_id;
-                       fw_notify("%s, making local node (%02x) root.\n",
-                                 "BM lock failed", new_root_id);
+                       fw_notify("BM lock failed, making local node (%02x) root.\n",
+                                 new_root_id);
                        goto pick_me;
                }
        } else if (card->bm_generation != generation) {
index 4560d8ffa171718b498480b66d8384be9c7deb8e..5089331544ed5336e682fdec039be94068314102 100644 (file)
@@ -1299,24 +1299,24 @@ static int dispatch_ioctl(struct client *client,
        int ret;
 
        if (_IOC_TYPE(cmd) != '#' ||
-           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
-           _IOC_SIZE(cmd) > sizeof(buffer))
+           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
                return -EINVAL;
 
-       if (_IOC_DIR(cmd) == _IOC_READ)
-               memset(&buffer, 0, _IOC_SIZE(cmd));
-
-       if (_IOC_DIR(cmd) & _IOC_WRITE)
-               if (copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+       if (_IOC_DIR(cmd) & _IOC_WRITE) {
+               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+                   copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
                        return -EFAULT;
+       }
 
        ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
        if (ret < 0)
                return ret;
 
-       if (_IOC_DIR(cmd) & _IOC_READ)
-               if (copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+       if (_IOC_DIR(cmd) & _IOC_READ) {
+               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+                   copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
                        return -EFAULT;
+       }
 
        return ret;
 }
index 17e2b1740202a3921edde10eeeec757cdaedd5e5..9d0dfcbe2c1c82561bf1f026bac88f08324b3037 100644 (file)
@@ -463,7 +463,6 @@ static int read_bus_info_block(struct fw_device *device, int generation)
                return -ENOMEM;
 
        stack = &rom[READ_BIB_ROM_SIZE];
-       memset(rom, 0, sizeof(*rom) * READ_BIB_ROM_SIZE);
 
        device->max_speed = SCODE_100;
 
index 8e7a100a4b6d8c2174016abe8787b55f81008fc8..720b39b0b4edb0da6b3501d311c9165e628ccca5 100644 (file)
@@ -628,7 +628,7 @@ static void ar_context_tasklet(unsigned long data)
        d = &ab->descriptor;
 
        if (d->res_count == 0) {
-               size_t size, size2, rest, pktsize, size3, offset;
+               size_t size, rest, offset;
                dma_addr_t start_bus;
                void *start;
 
@@ -639,61 +639,25 @@ static void ar_context_tasklet(unsigned long data)
                 */
 
                offset = offsetof(struct ar_buffer, data);
-               start = ab;
+               start = buffer = ab;
                start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-               buffer = ab->data;
 
                ab = ab->next;
                d = &ab->descriptor;
-               size = start + PAGE_SIZE - ctx->pointer;
-               /* valid buffer data in the next page */
+               size = buffer + PAGE_SIZE - ctx->pointer;
                rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
-               /* what actually fits in this page */
-               size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
                memmove(buffer, ctx->pointer, size);
-               memcpy(buffer + size, ab->data, size2);
-
-               while (size > 0) {
-                       void *next = handle_ar_packet(ctx, buffer);
-                       pktsize = next - buffer;
-                       if (pktsize >= size) {
-                               /*
-                                * We have handled all the data that was
-                                * originally in this page, so we can now
-                                * continue in the next page.
-                                */
-                               buffer = next;
-                               break;
-                       }
-                       /* move the next packet to the start of the buffer */
-                       memmove(buffer, next, size + size2 - pktsize);
-                       size -= pktsize;
-                       /* fill up this page again */
-                       size3 = min(rest - size2,
-                                   (size_t)PAGE_SIZE - offset - size - size2);
-                       memcpy(buffer + size + size2,
-                              (void *) ab->data + size2, size3);
-                       size2 += size3;
-               }
-
-               if (rest > 0) {
-                       /* handle the packets that are fully in the next page */
-                       buffer = (void *) ab->data +
-                                       (buffer - (start + offset + size));
-                       end = (void *) ab->data + rest;
+               memcpy(buffer + size, ab->data, rest);
+               ctx->current_buffer = ab;
+               ctx->pointer = (void *) ab->data + rest;
+               end = buffer + size + rest;
 
-                       while (buffer < end)
-                               buffer = handle_ar_packet(ctx, buffer);
-
-                       ctx->current_buffer = ab;
-                       ctx->pointer = end;
+               while (buffer < end)
+                       buffer = handle_ar_packet(ctx, buffer);
 
-                       dma_free_coherent(ohci->card.device, PAGE_SIZE,
-                                         start, start_bus);
-                       ar_context_add_page(ctx);
-               } else {
-                       ctx->pointer = start + PAGE_SIZE;
-               }
+               dma_free_coherent(ohci->card.device, PAGE_SIZE,
+                                 start, start_bus);
+               ar_context_add_page(ctx);
        } else {
                buffer = ctx->pointer;
                ctx->pointer = end =
index 382ae877f3700c17453655fc2d5753c15bb9f9e9..fdfca9e7cfe6564c833e6715bd0c6f731cbc24ff 100755 (executable)
@@ -85,17 +85,17 @@ static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset)
        return !((ret>>offset)^gpn_pol);
 }
 
-static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm831x_gpio_direction_out(struct gpio_chip *chip,
+                                    unsigned offset, int value)
 {
        struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
        struct wm831x *wm831x = wm831x_gpio->wm831x;
 
-       wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
-                       value << offset);
+       return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset,
+                              WM831X_GPN_DIR | WM831X_GPN_TRI, 0);
 }
 
-static int wm831x_gpio_direction_out(struct gpio_chip *chip,
-                                    unsigned offset, int value)
+static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
        struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
        struct wm831x *wm831x = wm831x_gpio->wm831x;
@@ -114,7 +114,8 @@ static int wm831x_gpio_direction_out(struct gpio_chip *chip,
        /* Can only set GPIO state once it's in output mode */
        wm831x_gpio_set(chip, offset, value);
 
-       return 0;
+       wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
+                       value << offset);
 }
 
 static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
index 1b8745dfd457cb09552a3d8265871c0dda23a65b..afed886cb0fef0dcd544c2b0d9bad72a3a52593c 100644 (file)
@@ -104,7 +104,6 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
        if (connector->status == connector_status_disconnected) {
                DRM_DEBUG_KMS("%s is disconnected\n",
                          drm_get_connector_name(connector));
-               drm_mode_connector_update_edid_property(connector, NULL);
                goto prune;
        }
 
@@ -925,13 +924,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
 
        if (mode_changed) {
+               old_fb = set->crtc->fb;
+               set->crtc->fb = set->fb;
                set->crtc->enabled = (set->mode != NULL);
                if (set->mode != NULL) {
                        DRM_DEBUG_KMS("attempting to set mode from"
                                        " userspace\n");
                        drm_mode_debug_printmodeline(set->mode);
-                       old_fb = set->crtc->fb;
-                       set->crtc->fb = set->fb;
                        if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
                                                      set->x, set->y,
                                                      old_fb)) {
index 0e27d98b2b9fe02482383337530771b116e6115d..a75ca63deea65c09d5b5b0d46265daf9482157fe 100644 (file)
@@ -470,9 +470,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
                                retcode = -EFAULT;
                                goto err_i1;
                        }
-               } else
-                       memset(kdata, 0, _IOC_SIZE(cmd));
-
+               }
                retcode = func(dev, kdata, file_priv);
 
                if (cmd & IOC_OUT) {
index 1097dece323cafadbf6c3532aa42cbe9b36a27b5..b54ba63d506e0350abbf53acd50420cbeb79f7c3 100644 (file)
@@ -85,8 +85,6 @@ static struct edid_quirk {
 
        /* Envision Peripherals, Inc. EN-7100e */
        { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
-       /* Envision EN2028 */
-       { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
 
        /* Funai Electronics PM36B */
        { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
@@ -324,7 +322,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1024x768@85Hz */
        { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
-                  1168, 1376, 0, 768, 769, 772, 808, 0,
+                  1072, 1376, 0, 768, 769, 772, 808, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 1152x864@75Hz */
        { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -565,8 +563,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
                mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
                                    false);
                mode->hdisplay = 1366;
-               mode->hsync_start = mode->hsync_start - 1;
-               mode->hsync_end = mode->hsync_end - 1;
+               mode->vsync_start = mode->vsync_start - 1;
+               mode->vsync_end = mode->vsync_end - 1;
                return mode;
        }
        mode = NULL;
@@ -655,6 +653,15 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
        mode->vsync_end = mode->vsync_start + vsync_pulse_width;
        mode->vtotal = mode->vdisplay + vblank;
 
+       /* perform the basic check for the detailed timing */
+       if (mode->hsync_end > mode->htotal ||
+               mode->vsync_end > mode->vtotal) {
+               drm_mode_destroy(dev, mode);
+               DRM_DEBUG_KMS("Incorrect detailed timing. "
+                               "Sync is beyond the blank.\n");
+               return NULL;
+       }
+
        /* Some EDIDs have bogus h/vtotal values */
        if (mode->hsync_end > mode->htotal)
                mode->htotal = mode->hsync_end + 1;
@@ -827,57 +834,8 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
        return modes;
 }
 
-static int add_detailed_modes(struct drm_connector *connector,
-                             struct detailed_timing *timing,
-                             struct edid *edid, u32 quirks, int preferred)
-{
-       int i, modes = 0;
-       struct detailed_non_pixel *data = &timing->data.other_data;
-       int timing_level = standard_timing_level(edid);
-       struct drm_display_mode *newmode;
-       struct drm_device *dev = connector->dev;
-
-       if (timing->pixel_clock) {
-               newmode = drm_mode_detailed(dev, edid, timing, quirks);
-               if (!newmode)
-                       return 0;
-
-               if (preferred)
-                       newmode->type |= DRM_MODE_TYPE_PREFERRED;
-
-               drm_mode_probed_add(connector, newmode);
-               return 1;
-       }
-
-       /* other timing types */
-       switch (data->type) {
-       case EDID_DETAIL_MONITOR_RANGE:
-               /* Get monitor range data */
-               break;
-       case EDID_DETAIL_STD_MODES:
-               /* Six modes per detailed section */
-               for (i = 0; i < 6; i++) {
-                       struct std_timing *std;
-                       struct drm_display_mode *newmode;
-
-                       std = &data->data.timings[i];
-                       newmode = drm_mode_std(dev, std, edid->revision,
-                                              timing_level);
-                       if (newmode) {
-                               drm_mode_probed_add(connector, newmode);
-                               modes++;
-                       }
-               }
-               break;
-       default:
-               break;
-       }
-
-       return modes;
-}
-
 /**
- * add_detailed_info - get detailed mode info from EDID data
+ * add_detailed_modes - get detailed mode info from EDID data
  * @connector: attached connector
  * @edid: EDID block to scan
  * @quirks: quirks to apply
@@ -888,24 +846,67 @@ static int add_detailed_modes(struct drm_connector *connector,
 static int add_detailed_info(struct drm_connector *connector,
                             struct edid *edid, u32 quirks)
 {
-       int i, modes = 0;
+       struct drm_device *dev = connector->dev;
+       int i, j, modes = 0;
+       int timing_level;
+
+       timing_level = standard_timing_level(edid);
 
        for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
                struct detailed_timing *timing = &edid->detailed_timings[i];
-               int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+               struct detailed_non_pixel *data = &timing->data.other_data;
+               struct drm_display_mode *newmode;
 
-               /* In 1.0, only timings are allowed */
-               if (!timing->pixel_clock && edid->version == 1 &&
-                       edid->revision == 0)
-                       continue;
+               /* X server check is version 1.1 or higher */
+               if (edid->version == 1 && edid->revision >= 1 &&
+                   !timing->pixel_clock) {
+                       /* Other timing or info */
+                       switch (data->type) {
+                       case EDID_DETAIL_MONITOR_SERIAL:
+                               break;
+                       case EDID_DETAIL_MONITOR_STRING:
+                               break;
+                       case EDID_DETAIL_MONITOR_RANGE:
+                               /* Get monitor range data */
+                               break;
+                       case EDID_DETAIL_MONITOR_NAME:
+                               break;
+                       case EDID_DETAIL_MONITOR_CPDATA:
+                               break;
+                       case EDID_DETAIL_STD_MODES:
+                               for (j = 0; j < 6; i++) {
+                                       struct std_timing *std;
+                                       struct drm_display_mode *newmode;
+
+                                       std = &data->data.timings[j];
+                                       newmode = drm_mode_std(dev, std,
+                                                              edid->revision,
+                                                              timing_level);
+                                       if (newmode) {
+                                               drm_mode_probed_add(connector, newmode);
+                                               modes++;
+                                       }
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+               } else {
+                       newmode = drm_mode_detailed(dev, edid, timing, quirks);
+                       if (!newmode)
+                               continue;
+
+                       /* First detailed mode is preferred */
+                       if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
+                               newmode->type |= DRM_MODE_TYPE_PREFERRED;
+                       drm_mode_probed_add(connector, newmode);
 
-               modes += add_detailed_modes(connector, timing, edid, quirks,
-                                           preferred);
+                       modes++;
+               }
        }
 
        return modes;
 }
-
 /**
  * add_detailed_mode_eedid - get detailed mode info from addtional timing
  *                     EDID block
@@ -919,9 +920,12 @@ static int add_detailed_info(struct drm_connector *connector,
 static int add_detailed_info_eedid(struct drm_connector *connector,
                             struct edid *edid, u32 quirks)
 {
-       int i, modes = 0;
+       struct drm_device *dev = connector->dev;
+       int i, j, modes = 0;
        char *edid_ext = NULL;
        struct detailed_timing *timing;
+       struct detailed_non_pixel *data;
+       struct drm_display_mode *newmode;
        int edid_ext_num;
        int start_offset, end_offset;
        int timing_level;
@@ -972,7 +976,51 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
        for (i = start_offset; i < end_offset;
                        i += sizeof(struct detailed_timing)) {
                timing = (struct detailed_timing *)(edid_ext + i);
-               modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+               data = &timing->data.other_data;
+               /* Detailed mode timing */
+               if (timing->pixel_clock) {
+                       newmode = drm_mode_detailed(dev, edid, timing, quirks);
+                       if (!newmode)
+                               continue;
+
+                       drm_mode_probed_add(connector, newmode);
+
+                       modes++;
+                       continue;
+               }
+
+               /* Other timing or info */
+               switch (data->type) {
+               case EDID_DETAIL_MONITOR_SERIAL:
+                       break;
+               case EDID_DETAIL_MONITOR_STRING:
+                       break;
+               case EDID_DETAIL_MONITOR_RANGE:
+                       /* Get monitor range data */
+                       break;
+               case EDID_DETAIL_MONITOR_NAME:
+                       break;
+               case EDID_DETAIL_MONITOR_CPDATA:
+                       break;
+               case EDID_DETAIL_STD_MODES:
+                       /* Five modes per detailed section */
+                       for (j = 0; j < 5; i++) {
+                               struct std_timing *std;
+                               struct drm_display_mode *newmode;
+
+                               std = &data->data.timings[j];
+                               newmode = drm_mode_std(dev, std,
+                                                      edid->revision,
+                                                      timing_level);
+                               if (newmode) {
+                                       drm_mode_probed_add(connector, newmode);
+                                       modes++;
+                               }
+                       }
+                       break;
+               default:
+                       break;
+               }
        }
 
        return modes;
index ba145531ca031499fb730f0089eb9603f7fd6201..251bc0e3b5ecd92acbb0244634ca46787bb0284d 100644 (file)
@@ -140,16 +140,14 @@ int drm_open(struct inode *inode, struct file *filp)
                spin_unlock(&dev->count_lock);
        }
 out:
-       if (!retcode) {
-               mutex_lock(&dev->struct_mutex);
-               if (minor->type == DRM_MINOR_LEGACY) {
-                       if (dev->dev_mapping == NULL)
-                               dev->dev_mapping = inode->i_mapping;
-                       else if (dev->dev_mapping != inode->i_mapping)
-                               retcode = -ENODEV;
-               }
-               mutex_unlock(&dev->struct_mutex);
+       mutex_lock(&dev->struct_mutex);
+       if (minor->type == DRM_MINOR_LEGACY) {
+               BUG_ON((dev->dev_mapping != NULL) &&
+                       (dev->dev_mapping != inode->i_mapping));
+               if (dev->dev_mapping == NULL)
+                       dev->dev_mapping = inode->i_mapping;
        }
+       mutex_unlock(&dev->struct_mutex);
 
        return retcode;
 }
index 16dce8479e7fad5d0406776f1a828065fff58004..9ecc907384ec339d312ca31c5ea26a2a0636aedf 100644 (file)
@@ -214,7 +214,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
        uint8_t ctl2;
 
        if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
-               if (ctl2 & TFP410_CTL_2_RSEN)
+               if (ctl2 & TFP410_CTL_2_HTPLG)
                        ret = connector_status_connected;
                else
                        ret = connector_status_disconnected;
index c3aca5c4de996405d5149a0070b8a8a9a860caca..eaa1893b6e9b977c3ae8a63c618fb9a1dd7ebd5b 100644 (file)
@@ -683,10 +683,8 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
                ret = copy_from_user(cliprects, batch->cliprects,
                                     batch->num_cliprects *
                                     sizeof(struct drm_clip_rect));
-               if (ret != 0) {
-                       ret = -EFAULT;
+               if (ret != 0)
                        goto fail_free;
-               }
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -727,10 +725,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
-       if (ret != 0) {
-               ret = -EFAULT;
+       if (ret != 0)
                goto fail_batch_free;
-       }
 
        if (cmdbuf->num_cliprects) {
                cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -741,10 +737,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                ret = copy_from_user(cliprects, cmdbuf->cliprects,
                                     cmdbuf->num_cliprects *
                                     sizeof(struct drm_clip_rect));
-               if (ret != 0) {
-                       ret = -EFAULT;
+               if (ret != 0)
                        goto fail_clip_free;
-               }
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -1532,15 +1526,6 @@ int i915_driver_unload(struct drm_device *dev)
        }
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               /*
-                * free the memory space allocated for the child device
-                * config parsed from VBT
-                */
-               if (dev_priv->child_dev && dev_priv->child_dev_num) {
-                       kfree(dev_priv->child_dev);
-                       dev_priv->child_dev = NULL;
-                       dev_priv->child_dev_num = 0;
-               }
                drm_irq_uninstall(dev);
                vga_client_register(dev->pdev, NULL, NULL, NULL);
        }
index 544923988d138e1d55dfa25be708b85ae3fdf4be..7f436ec075f6221df8ee6beafebae603054be003 100644 (file)
@@ -192,7 +192,6 @@ int i965_reset(struct drm_device *dev, u8 flags)
                }
        } else {
                DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
-               mutex_unlock(&dev->struct_mutex);
                return -ENODEV;
        }
 
index 97163f7f2bad0fe33316c1bce3d397719b140a55..f5d49a7ac745e576dbf049409ec3f8d9a0c5ccb5 100644 (file)
@@ -258,7 +258,7 @@ typedef struct drm_i915_private {
 
        struct notifier_block lid_notifier;
 
-       int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+       int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
        struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
        int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -555,8 +555,6 @@ typedef struct drm_i915_private {
        struct timer_list idle_timer;
        bool busy;
        u16 orig_clock;
-       int child_dev_num;
-       struct child_device_config *child_dev;
        struct drm_connector *int_lvds_connector;
 } drm_i915_private_t;
 
index 3ada62b8bd38190fc91b3e69bb6e2bcf3ae3a5c0..04da731a70abf78b4366c13d851ff79e6b908245 100644 (file)
@@ -1470,6 +1470,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
                obj_priv->dirty = 0;
 
        for (i = 0; i < page_count; i++) {
+               if (obj_priv->pages[i] == NULL)
+                       break;
+
                if (obj_priv->dirty)
                        set_page_dirty(obj_priv->pages[i]);
 
@@ -2243,6 +2246,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
        struct address_space *mapping;
        struct inode *inode;
        struct page *page;
+       int ret;
 
        if (obj_priv->pages_refcount++ != 0)
                return 0;
@@ -2262,13 +2266,14 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
        mapping = inode->i_mapping;
        for (i = 0; i < page_count; i++) {
                page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER |
+                                          mapping_gfp_mask (mapping) |
                                           __GFP_COLD |
-                                          __GFP_RECLAIMABLE |
                                           gfpmask);
-               if (IS_ERR(page))
-                       goto err_pages;
-
+               if (IS_ERR(page)) {
+                       ret = PTR_ERR(page);
+                       i915_gem_object_put_pages(obj);
+                       return ret;
+               }
                obj_priv->pages[i] = page;
        }
 
@@ -2276,15 +2281,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
                i915_gem_object_do_bit_17_swizzle(obj);
 
        return 0;
-
-err_pages:
-       while (i--)
-               page_cache_release(obj_priv->pages[i]);
-
-       drm_free_large(obj_priv->pages);
-       obj_priv->pages = NULL;
-       obj_priv->pages_refcount--;
-       return PTR_ERR(page);
 }
 
 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -2335,12 +2331,6 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        pitch_val = obj_priv->stride / tile_width;
        pitch_val = ffs(pitch_val) - 1;
 
-       if (obj_priv->tiling_mode == I915_TILING_Y &&
-           HAS_128_BYTE_Y_TILING(dev))
-               WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
-       else
-               WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
-
        val = obj_priv->gtt_offset;
        if (obj_priv->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2616,14 +2606,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                return -EINVAL;
        }
 
-       /* If the object is bigger than the entire aperture, reject it early
-        * before evicting everything in a vain attempt to find space.
-        */
-       if (obj->size > dev->gtt_total) {
-               DRM_ERROR("Attempting to bind an object larger than the aperture\n");
-               return -E2BIG;
-       }
-
  search_free:
        free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
                                        obj->size, alignment, 0);
@@ -3667,7 +3649,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                if (ret != 0) {
                        DRM_ERROR("copy %d cliprects failed: %d\n",
                                  args->num_cliprects, ret);
-                       ret = -EFAULT;
                        goto pre_mutex_err;
                }
        }
@@ -3949,17 +3930,6 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        int ret;
 
        i915_verify_inactive(dev, __FILE__, __LINE__);
-
-       if (obj_priv->gtt_space != NULL) {
-               if (alignment == 0)
-                       alignment = i915_gem_get_gtt_alignment(obj);
-               if (obj_priv->gtt_offset & (alignment - 1)) {
-                       ret = i915_gem_object_unbind(obj);
-                       if (ret)
-                               return ret;
-               }
-       }
-
        if (obj_priv->gtt_space == NULL) {
                ret = i915_gem_object_bind_to_gtt(obj, alignment);
                if (ret)
@@ -4699,16 +4669,6 @@ i915_gem_load(struct drm_device *dev)
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
 
-       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-       if (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
-               u32 tmp = I915_READ(MI_ARB_STATE);
-               if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
-                       /* arb state is a masked write, so set bit + bit in mask */
-                       tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
-                       I915_WRITE(MI_ARB_STATE, tmp);
-               }
-       }
-
        /* Old X drivers will take 0-2 for front, back, depth buffers */
        dev_priv->fence_reg_start = 3;
 
index fb2811c58affbdbb38421ee2c872ce02d78e545f..200e398453ca5305987a2d851a5361ba29d7bba2 100644 (file)
@@ -353,17 +353,21 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
                 * reg, so dont bother to check the size */
                if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
                        return false;
-       } else if (IS_I9XX(dev) || IS_I8XX(dev)) {
-               if (stride > 8192)
+       } else if (IS_I9XX(dev)) {
+               uint32_t pitch_val = ffs(stride / tile_width) - 1;
+
+               /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
+                * instead of 4 (2KB) on 945s.
+                */
+               if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
+                   size > (I830_FENCE_MAX_SIZE_VAL << 20))
                        return false;
+       } else {
+               uint32_t pitch_val = ffs(stride / tile_width) - 1;
 
-               if (IS_I9XX(dev)) {
-                       if (size > I830_FENCE_MAX_SIZE_VAL << 20)
-                               return false;
-               } else {
-                       if (size > I830_FENCE_MAX_SIZE_VAL << 19)
-                               return false;
-               }
+               if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
+                   size > (I830_FENCE_MAX_SIZE_VAL << 19))
+                       return false;
        }
 
        /* 965+ just needs multiples of tile width */
index 7214c852df99a188692a585a83753aea98774070..cc9b49ab1fd03f43400c4d9bf920bb4bf8aa8dd3 100644 (file)
 #define   I830_FENCE_SIZE_BITS(size)   ((ffs((size) >> 19) - 1) << 8)
 #define   I830_FENCE_PITCH_SHIFT       4
 #define   I830_FENCE_REG_VALID         (1<<0)
-#define   I915_FENCE_MAX_PITCH_VAL     4
+#define   I915_FENCE_MAX_PITCH_VAL     0x10
 #define   I830_FENCE_MAX_PITCH_VAL     6
 #define   I830_FENCE_MAX_SIZE_VAL      (1<<8)
 
 #define LM_BURST_LENGTH     0x00000700
 #define LM_FIFO_WATERMARK   0x0000001F
 #define MI_ARB_STATE   0x020e4 /* 915+ only */
-#define   MI_ARB_MASK_SHIFT      16    /* shift for enable bits */
-
-/* Make render/texture TLB fetches lower priorty than associated data
- *   fetches. This is not turned on by default
- */
-#define   MI_ARB_RENDER_TLB_LOW_PRIORITY       (1 << 15)
-
-/* Isoch request wait on GTT enable (Display A/B/C streams).
- * Make isoch requests stall on the TLB update. May cause
- * display underruns (test mode only)
- */
-#define   MI_ARB_ISOCH_WAIT_GTT                        (1 << 14)
-
-/* Block grant count for isoch requests when block count is
- * set to a finite value.
- */
-#define   MI_ARB_BLOCK_GRANT_MASK              (3 << 12)
-#define   MI_ARB_BLOCK_GRANT_8                 (0 << 12)       /* for 3 display planes */
-#define   MI_ARB_BLOCK_GRANT_4                 (1 << 12)       /* for 2 display planes */
-#define   MI_ARB_BLOCK_GRANT_2                 (2 << 12)       /* for 1 display plane */
-#define   MI_ARB_BLOCK_GRANT_0                 (3 << 12)       /* don't use */
-
-/* Enable render writes to complete in C2/C3/C4 power states.
- * If this isn't enabled, render writes are prevented in low
- * power states. That seems bad to me.
- */
-#define   MI_ARB_C3_LP_WRITE_ENABLE            (1 << 11)
-
-/* This acknowledges an async flip immediately instead
- * of waiting for 2TLB fetches.
- */
-#define   MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE      (1 << 10)
-
-/* Enables non-sequential data reads through arbiter
- */
-#define   MI_ARB_DUAL_DATA_PHASE_DISABLE               (1 << 9)
-
-/* Disable FSB snooping of cacheable write cycles from binner/render
- * command stream
- */
-#define   MI_ARB_CACHE_SNOOP_DISABLE           (1 << 8)
-
-/* Arbiter time slice for non-isoch streams */
-#define   MI_ARB_TIME_SLICE_MASK               (7 << 5)
-#define   MI_ARB_TIME_SLICE_1                  (0 << 5)
-#define   MI_ARB_TIME_SLICE_2                  (1 << 5)
-#define   MI_ARB_TIME_SLICE_4                  (2 << 5)
-#define   MI_ARB_TIME_SLICE_6                  (3 << 5)
-#define   MI_ARB_TIME_SLICE_8                  (4 << 5)
-#define   MI_ARB_TIME_SLICE_10                 (5 << 5)
-#define   MI_ARB_TIME_SLICE_14                 (6 << 5)
-#define   MI_ARB_TIME_SLICE_16                 (7 << 5)
-
-/* Low priority grace period page size */
-#define   MI_ARB_LOW_PRIORITY_GRACE_4KB                (0 << 4)        /* default */
-#define   MI_ARB_LOW_PRIORITY_GRACE_8KB                (1 << 4)
-
-/* Disable display A/B trickle feed */
-#define   MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE  (1 << 2)
-
-/* Set display plane priority */
-#define   MI_ARB_DISPLAY_PRIORITY_A_B          (0 << 0)        /* display A > display B */
-#define   MI_ARB_DISPLAY_PRIORITY_B_A          (1 << 0)        /* display B > display A */
-
 #define CACHE_MODE_0   0x02120 /* 915+ only */
 #define   CM0_MASK_SHIFT          16
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
index d4b5b18f2f115d56d9cc58fcebc99f8c150e64c7..96cd256e60e6fd117ed00cf0f54503cc5312fadf 100644 (file)
@@ -241,6 +241,10 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
                GPIOF,
        };
 
+       /* Set sensible defaults in case we can't find the general block
+          or it is the wrong chipset */
+       dev_priv->crt_ddc_bus = -1;
+
        general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (general) {
                u16 block_size = get_blocksize(general);
@@ -362,70 +366,6 @@ parse_driver_features(struct drm_i915_private *dev_priv,
                dev_priv->render_reclock_avail = true;
 }
 
-static void
-parse_device_mapping(struct drm_i915_private *dev_priv,
-                      struct bdb_header *bdb)
-{
-       struct bdb_general_definitions *p_defs;
-       struct child_device_config *p_child, *child_dev_ptr;
-       int i, child_device_num, count;
-       u16     block_size;
-
-       p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
-       if (!p_defs) {
-               DRM_DEBUG_KMS("No general definition block is found\n");
-               return;
-       }
-       /* judge whether the size of child device meets the requirements.
-        * If the child device size obtained from general definition block
-        * is different with sizeof(struct child_device_config), skip the
-        * parsing of sdvo device info
-        */
-       if (p_defs->child_dev_size != sizeof(*p_child)) {
-               /* different child dev size . Ignore it */
-               DRM_DEBUG_KMS("different child size is found. Invalid.\n");
-               return;
-       }
-       /* get the block size of general definitions */
-       block_size = get_blocksize(p_defs);
-       /* get the number of child device */
-       child_device_num = (block_size - sizeof(*p_defs)) /
-                               sizeof(*p_child);
-       count = 0;
-       /* get the number of child device that is present */
-       for (i = 0; i < child_device_num; i++) {
-               p_child = &(p_defs->devices[i]);
-               if (!p_child->device_type) {
-                       /* skip the device block if device type is invalid */
-                       continue;
-               }
-               count++;
-       }
-       if (!count) {
-               DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
-               return;
-       }
-       dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
-       if (!dev_priv->child_dev) {
-               DRM_DEBUG_KMS("No memory space for child device\n");
-               return;
-       }
-
-       dev_priv->child_dev_num = count;
-       count = 0;
-       for (i = 0; i < child_device_num; i++) {
-               p_child = &(p_defs->devices[i]);
-               if (!p_child->device_type) {
-                       /* skip the device block if device type is invalid */
-                       continue;
-               }
-               child_dev_ptr = dev_priv->child_dev + count;
-               count++;
-               memcpy((void *)child_dev_ptr, (void *)p_child,
-                                       sizeof(*p_child));
-       }
-       return;
-}
 /**
  * intel_init_bios - initialize VBIOS settings & find VBT
  * @dev: DRM device
@@ -477,7 +417,6 @@ intel_init_bios(struct drm_device *dev)
        parse_lfp_panel_data(dev_priv, bdb);
        parse_sdvo_panel_data(dev_priv, bdb);
        parse_sdvo_device_mapping(dev_priv, bdb);
-       parse_device_mapping(dev_priv, bdb);
        parse_driver_features(dev_priv, bdb);
 
        pci_unmap_rom(pdev, bios);
index 425ac9d7f724792cce3e36f90122f3e644aceca3..0f8e5f69ac7a315eb8eb8c4b9f377a87cc8e5f6c 100644 (file)
@@ -549,21 +549,4 @@ bool intel_init_bios(struct drm_device *dev);
 #define   SWF14_APM_STANDBY    0x1
 #define   SWF14_APM_RESTORE    0x0
 
-/* Add the device class for LFP, TV, HDMI */
-#define         DEVICE_TYPE_INT_LFP    0x1022
-#define         DEVICE_TYPE_INT_TV     0x1009
-#define         DEVICE_TYPE_HDMI       0x60D2
-#define         DEVICE_TYPE_DP         0x68C6
-#define         DEVICE_TYPE_eDP        0x78C6
-
-/* define the DVO port for HDMI output type */
-#define                DVO_B           1
-#define                DVO_C           2
-#define                DVO_D           3
-
-/* define the PORT for DP output type */
-#define                PORT_IDPB       7
-#define                PORT_IDPC       8
-#define                PORT_IDPD       9
-
 #endif /* _I830_BIOS_H_ */
index 166a24e76b21e388ffaeca2fd2febac41a5ecd15..5e730e6f8a74942bd5afb20dd24affac9fdd7359 100644 (file)
@@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *dev)
        else {
                i2c_reg = GPIOA;
                /* Use VBT information for CRT DDC if available */
-               if (dev_priv->crt_ddc_bus != 0)
+               if (dev_priv->crt_ddc_bus != -1)
                        i2c_reg = dev_priv->crt_ddc_bus;
        }
        intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
index 79cc437af3b8f7879f7e60ed4cc842b524d62973..b00a1aaf0d71fc19e00a70887d37422647d90070 100644 (file)
@@ -785,8 +785,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
        intel_clock_t clock;
        int max_n;
        bool found;
-       /* approximately equals target * 0.00585 */
-       int err_most = (target >> 8) + (target >> 9);
+       /* approximately equals target * 0.00488 */
+       int err_most = (target >> 8) + (target >> 10);
        found = false;
 
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -1402,7 +1402,6 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
        dpa_ctl = I915_READ(DP_A);
        dpa_ctl |= DP_PLL_ENABLE;
        I915_WRITE(DP_A, dpa_ctl);
-       POSTING_READ(DP_A);
        udelay(200);
 }
 
@@ -1954,9 +1953,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
        int pipe = intel_crtc->pipe;
        bool enabled;
 
-       if (intel_crtc->dpms_mode == mode)
-               return;
-
        dev_priv->display.dpms(crtc, mode);
 
        intel_crtc->dpms_mode = mode;
@@ -4003,7 +3999,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        }
 
        intel_crtc->cursor_addr = 0;
-       intel_crtc->dpms_mode = -1;
+       intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
        intel_crtc->busy = false;
@@ -4326,7 +4322,7 @@ static void intel_init_display(struct drm_device *dev)
        }
 
        /* Returns the core display clock speed */
-       if (IS_I945G(dev) || (IS_G33(dev) && ! IS_IGDGM(dev)))
+       if (IS_I945G(dev))
                dev_priv->display.get_display_clock_speed =
                        i945_get_display_clock_speed;
        else if (IS_I915G(dev))
index d5b736104fd8b8286ea9290bf6241a4424aa6205..952bb4e2484d7acb100d6a4e8bc27a356f142931 100644 (file)
@@ -629,13 +629,6 @@ static const struct dmi_system_id bad_lid_status[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
                },
        },
-       {
-               .ident = "Clevo M5x0N",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
-                       DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
-               },
-       },
        { }
 };
 
@@ -648,12 +641,8 @@ static const struct dmi_system_id bad_lid_status[] = {
  */
 static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
        enum drm_connector_status status = connector_status_connected;
 
-       if (IS_I8XX(dev))
-               return connector_status_connected;
-
        if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
                status = connector_status_disconnected;
 
@@ -889,57 +878,68 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
                },
        },
-       {
-               .callback = intel_no_lvds_dmi_callback,
-               .ident = "Clientron U800",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
-               },
-       },
 
        { }     /* terminating entry */
 };
 
+#ifdef CONFIG_ACPI
 /*
- * Enumerate the child dev array parsed from VBT to check whether
- * the LVDS is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the LVDS is present.
- * Note: The addin_offset should also be checked for LVDS panel.
- * Only when it is non-zero, it is assumed that it is present.
+ * check_lid_device -- check whether @handle is an ACPI LID device.
+ * @handle: ACPI device handle
+ * @level : depth in the ACPI namespace tree
+ * @context: the number of LID device when we find the device
+ * @rv: a return value to fill if desired (Not use)
  */
-static int lvds_is_present_in_vbt(struct drm_device *dev)
+static acpi_status
+check_lid_device(acpi_handle handle, u32 level, void *context,
+                       void **return_value)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct child_device_config *p_child;
-       int i, ret;
+       struct acpi_device *acpi_dev;
+       int *lid_present = context;
+
+       acpi_dev = NULL;
+       /* Get the acpi device for device handle */
+       if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
+               /* If there is no ACPI device for handle, return */
+               return AE_OK;
+       }
 
-       if (!dev_priv->child_dev_num)
-               return 1;
+       if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
+               *lid_present = 1;
 
-       ret = 0;
-       for (i = 0; i < dev_priv->child_dev_num; i++) {
-               p_child = dev_priv->child_dev + i;
-               /*
-                * If the device type is not LFP, continue.
               * If the device type is 0x22, it is also regarded as LFP.
               */
-               if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
-                       p_child->device_type != DEVICE_TYPE_LFP)
-                       continue;
+       return AE_OK;
+}
+
+/**
+ * check whether there exists the ACPI LID device by enumerating the ACPI
* device tree.
+ */
+static int intel_lid_present(void)
+{
+       int lid_present = 0;
 
-               /* The addin_offset should be checked. Only when it is
-                * non-zero, it is regarded as present.
+       if (acpi_disabled) {
+               /* If ACPI is disabled, there is no ACPI device tree to
+                * check, so assume the LID device would have been present.
                 */
-               if (p_child->addin_offset) {
-                       ret = 1;
-                       break;
-               }
+               return 1;
        }
-       return ret;
+
+       acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+                               ACPI_UINT32_MAX,
+                               check_lid_device, &lid_present, NULL);
+
+       return lid_present;
 }
+#else
+static int intel_lid_present(void)
+{
+       /* In the absence of ACPI built in, assume that the LID device would
+        * have been present.
+        */
+       return 1;
+}
+#endif
 
 /**
  * intel_lvds_init - setup LVDS connectors on this device
@@ -964,10 +964,15 @@ void intel_lvds_init(struct drm_device *dev)
        if (dmi_check_system(intel_no_lvds))
                return;
 
-       if (!lvds_is_present_in_vbt(dev)) {
-               DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+       /* Assume that any device without an ACPI LID device also doesn't
+        * have an integrated LVDS.  We would be better off parsing the BIOS
+        * to get a reliable indicator, but that code isn't written yet.
+        *
+        * In the case of all-in-one desktops using LVDS that we've seen,
+        * they're using SDVO LVDS.
+        */
+       if (!intel_lid_present())
                return;
-       }
 
        if (IS_IGDNG(dev)) {
                if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
index 5d9c6a7a6ed55574e5c60ee1c7ad7e8150a0846a..3f5aaf14e6a3b5393c2334b0ce89e99e90a45119 100644 (file)
@@ -35,7 +35,6 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "intel_sdvo_regs.h"
-#include <linux/dmi.h>
 
 #undef SDVO_DEBUG
 
@@ -2290,25 +2289,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
                return 0x72;
 }
 
-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
-{
-       DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
-       return 1;
-}
-
-static struct dmi_system_id intel_sdvo_bad_tv[] = {
-       {
-               .callback = intel_sdvo_bad_tv_callback,
-               .ident = "IntelG45/ICH10R/DME1737",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
-               },
-       },
-
-       { }     /* terminating entry */
-};
-
 static bool
 intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
 {
@@ -2349,8 +2329,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
                                        (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
                                        (1 << INTEL_ANALOG_CLONE_BIT);
                }
-       } else if ((flags & SDVO_OUTPUT_SVID0) &&
-                  !dmi_check_system(intel_sdvo_bad_tv)) {
+       } else if (flags & SDVO_OUTPUT_SVID0) {
 
                sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
                encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
index 5b28b4e7ebe79ebfc7cc47608bd9501d9c0d0baf..ce026f002ea5f0674db27cba57570bee2d9f98b3 100644 (file)
@@ -1801,6 +1801,8 @@ intel_tv_init(struct drm_device *dev)
        drm_connector_attach_property(connector,
                                   dev->mode_config.tv_bottom_margin_property,
                                   tv_priv->margin[TV_MARGIN_BOTTOM]);
+
+       dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
 out:
        drm_sysfs_connector_add(connector);
 }
index ccf42c3dd1be66a246c4cef9fe0670e4239fb647..eb740fc3549f82c5530c5450f4862ca8cf764fa3 100644 (file)
@@ -368,8 +368,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                /* 2D, 3D, CUBE */
                switch (tmp) {
                case 0:
-               case 3:
-               case 4:
                case 5:
                case 6:
                case 7:
index d8c4f72eef8ed6eaaf9e3a53335162af0d8491da..2f43ee8e40480cc2895c663fec62751570a1332f 100644 (file)
@@ -346,12 +346,11 @@ void r300_gpu_init(struct radeon_device *rdev)
 
        r100_hdp_reset(rdev);
        /* FIXME: rv380 one pipes ? */
-       if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
-           (rdev->family == CHIP_R350)) {
+       if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
                /* r300,r350 */
                rdev->num_gb_pipes = 2;
        } else {
-               /* rv350,rv370,rv380,r300 AD */
+               /* rv350,rv370,rv380 */
                rdev->num_gb_pipes = 1;
        }
        rdev->num_z_pipes = 1;
index 731047301de4189541a1efb1396498199a8281a8..278f646bc18ef7dd0d027499f11b49d898b247fe 100644 (file)
@@ -1686,14 +1686,13 @@ int r600_init(struct radeon_device *rdev)
        if (rdev->accel_working) {
                r = radeon_ib_pool_init(rdev);
                if (r) {
-                       dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+                       DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+                       rdev->accel_working = false;
+               }
+               r = r600_ib_test(rdev);
+               if (r) {
+                       DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                        rdev->accel_working = false;
-               } else {
-                       r = r600_ib_test(rdev);
-                       if (r) {
-                               dev_err(rdev->dev, "IB test failed (%d).\n", r);
-                               rdev->accel_working = false;
-                       }
                }
        }
        return 0;
index 838b88c14f574e20adc0a18c5b73662c955461e5..0d820764f34074dc47b8164895a446b1510649af 100644 (file)
@@ -36,10 +36,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
 
-struct r600_cs_track {
-       u32     cb_color0_base_last;
-};
-
 /**
  * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
  * @parser:    parser structure holding parsing context.
@@ -180,28 +176,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
        return 0;
 }
 
-/**
- * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
- * @parser:            parser structure holding parsing context.
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
-{
-       struct radeon_cs_packet p3reloc;
-       int r;
-
-       r = r600_cs_packet_parse(p, &p3reloc, p->idx);
-       if (r) {
-               return 0;
-       }
-       if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
-               return 0;
-       }
-       return 1;
-}
-
 /**
  * r600_cs_packet_next_vline() - parse userspace VLINE packet
  * @parser:            parser structure holding parsing context.
@@ -363,7 +337,6 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                struct radeon_cs_packet *pkt)
 {
        struct radeon_cs_reloc *reloc;
-       struct r600_cs_track *track;
        volatile u32 *ib;
        unsigned idx;
        unsigned i;
@@ -371,7 +344,6 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
        int r;
        u32 idx_value;
 
-       track = (struct r600_cs_track *)p->track;
        ib = p->ib->ptr;
        idx = pkt->idx + 1;
        idx_value = radeon_get_ib_value(p, idx);
@@ -531,60 +503,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                for (i = 0; i < pkt->count; i++) {
                        reg = start_reg + (4 * i);
                        switch (reg) {
-                       /* This register were added late, there is userspace
-                        * which does provide relocation for those but set
-                        * 0 offset. In order to avoid breaking old userspace
-                        * we detect this and set address to point to last
-                        * CB_COLOR0_BASE, note that if userspace doesn't set
-                        * CB_COLOR0_BASE before this register we will report
-                        * error. Old userspace always set CB_COLOR0_BASE
-                        * before any of this.
-                        */
-                       case R_0280E0_CB_COLOR0_FRAG:
-                       case R_0280E4_CB_COLOR1_FRAG:
-                       case R_0280E8_CB_COLOR2_FRAG:
-                       case R_0280EC_CB_COLOR3_FRAG:
-                       case R_0280F0_CB_COLOR4_FRAG:
-                       case R_0280F4_CB_COLOR5_FRAG:
-                       case R_0280F8_CB_COLOR6_FRAG:
-                       case R_0280FC_CB_COLOR7_FRAG:
-                       case R_0280C0_CB_COLOR0_TILE:
-                       case R_0280C4_CB_COLOR1_TILE:
-                       case R_0280C8_CB_COLOR2_TILE:
-                       case R_0280CC_CB_COLOR3_TILE:
-                       case R_0280D0_CB_COLOR4_TILE:
-                       case R_0280D4_CB_COLOR5_TILE:
-                       case R_0280D8_CB_COLOR6_TILE:
-                       case R_0280DC_CB_COLOR7_TILE:
-                               if (!r600_cs_packet_next_is_pkt3_nop(p)) {
-                                       if (!track->cb_color0_base_last) {
-                                               dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
-                                               return -EINVAL;
-                                       }
-                                       ib[idx+1+i] = track->cb_color0_base_last;
-                                       printk_once(KERN_WARNING "You have old & broken userspace "
-                                               "please consider updating mesa & xf86-video-ati\n");
-                               } else {
-                                       r = r600_cs_packet_next_reloc(p, &reloc);
-                                       if (r) {
-                                               dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
-                                               return -EINVAL;
-                                       }
-                                       ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               }
-                               break;
                        case DB_DEPTH_BASE:
                        case DB_HTILE_DATA_BASE:
                        case CB_COLOR0_BASE:
-                               r = r600_cs_packet_next_reloc(p, &reloc);
-                               if (r) {
-                                       DRM_ERROR("bad SET_CONTEXT_REG "
-                                                       "0x%04X\n", reg);
-                                       return -EINVAL;
-                               }
-                               ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-                               track->cb_color0_base_last = ib[idx+1+i];
-                               break;
                        case CB_COLOR1_BASE:
                        case CB_COLOR2_BASE:
                        case CB_COLOR3_BASE:
@@ -757,11 +678,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
 int r600_cs_parse(struct radeon_cs_parser *p)
 {
        struct radeon_cs_packet pkt;
-       struct r600_cs_track *track;
        int r;
 
-       track = kzalloc(sizeof(*track), GFP_KERNEL);
-       p->track = track;
        do {
                r = r600_cs_packet_parse(p, &pkt, p->idx);
                if (r) {
@@ -839,7 +757,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
        /* initialize parser */
        memset(&parser, 0, sizeof(struct radeon_cs_parser));
        parser.filp = filp;
-       parser.dev = &dev->pdev->dev;
        parser.rdev = NULL;
        parser.family = family;
        parser.ib = &fake_ib;
index 56fc6589132e9707e6bb5137c1184c8f6319a847..27ab428b149bbccc6c077e6a648c991ebaddcd2a 100644 (file)
 #define                S_000E60_SOFT_RESET_TSC(x)              (((x) & 1) << 16)
 #define                S_000E60_SOFT_RESET_VMC(x)              (((x) & 1) << 17)
 
-#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL          0x5480
-
-#define R_0280E0_CB_COLOR0_FRAG                      0x0280E0
-#define   S_0280E0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
-#define   G_0280E0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
-#define   C_0280E0_BASE_256B                           0x00000000
-#define R_0280E4_CB_COLOR1_FRAG                      0x0280E4
-#define R_0280E8_CB_COLOR2_FRAG                      0x0280E8
-#define R_0280EC_CB_COLOR3_FRAG                      0x0280EC
-#define R_0280F0_CB_COLOR4_FRAG                      0x0280F0
-#define R_0280F4_CB_COLOR5_FRAG                      0x0280F4
-#define R_0280F8_CB_COLOR6_FRAG                      0x0280F8
-#define R_0280FC_CB_COLOR7_FRAG                      0x0280FC
-#define R_0280C0_CB_COLOR0_TILE                      0x0280C0
-#define   S_0280C0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
-#define   G_0280C0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
-#define   C_0280C0_BASE_256B                           0x00000000
-#define R_0280C4_CB_COLOR1_TILE                      0x0280C4
-#define R_0280C8_CB_COLOR2_TILE                      0x0280C8
-#define R_0280CC_CB_COLOR3_TILE                      0x0280CC
-#define R_0280D0_CB_COLOR4_TILE                      0x0280D0
-#define R_0280D4_CB_COLOR5_TILE                      0x0280D4
-#define R_0280D8_CB_COLOR6_TILE                      0x0280D8
-#define R_0280DC_CB_COLOR7_TILE                      0x0280DC
-
-
 #endif
index 6735213892d564ad0e4182920f58c8f7496e7859..224506a2f7b1ae6d53d5b68347a4a7889a262a2c 100644 (file)
@@ -448,7 +448,6 @@ struct radeon_cs_chunk {
 };
 
 struct radeon_cs_parser {
-       struct device           *dev;
        struct radeon_device    *rdev;
        struct drm_file         *filp;
        /* chunks */
index e5e22b1cf502dab69b5cd4ea480b0b52db9987e9..969502acc29cbeb33d164da0b854e2b94e3ef9c8 100644 (file)
@@ -161,15 +161,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
-       /* ASUS HD 3600 board lists the DVI port as HDMI */
-       if ((dev->pdev->device == 0x9598) &&
-           (dev->pdev->subsystem_vendor == 0x1043) &&
-           (dev->pdev->subsystem_device == 0x01e4)) {
-               if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
-                       *connector_type = DRM_MODE_CONNECTOR_DVII;
-               }
-       }
-
        /* ASUS HD 3450 board lists the DVI port as HDMI */
        if ((dev->pdev->device == 0x95C5) &&
            (dev->pdev->subsystem_vendor == 0x1043) &&
@@ -951,7 +942,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
                        le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
                lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
-                       le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+                       le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
                lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
                        le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
                lvds->panel_pwr_delay =
index b1dc1a112aef618da04053ca56cfe81c8e1fef27..29763ceae3af9de8dc2067ff38e0ac63d59fe75e 100644 (file)
@@ -140,14 +140,12 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
 {
        struct drm_device *dev = connector->dev;
        struct drm_connector *conflict;
-       struct radeon_connector *radeon_conflict;
        int i;
 
        list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
                if (conflict == connector)
                        continue;
 
-               radeon_conflict = to_radeon_connector(conflict);
                for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
                        if (conflict->encoder_ids[i] == 0)
                                break;
@@ -157,9 +155,6 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
                                if (conflict->status != connector_status_connected)
                                        continue;
 
-                               if (radeon_conflict->use_digital)
-                                       continue;
-
                                if (priority == true) {
                                        DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
                                        DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -286,7 +281,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
                radeon_encoder = to_radeon_encoder(encoder);
                if (!radeon_encoder->enc_priv)
                        return 0;
-               if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
+               if (rdev->is_atom_bios) {
                        struct radeon_encoder_atom_dac *dac_int;
                        dac_int = radeon_encoder->enc_priv;
                        dac_int->tv_std = val;
index c7236f4c6cdd23c7cf6f67fe68f25af477637fe6..4f7afc79dd82c6db776c55a656c38cd450bb575a 100644 (file)
@@ -417,9 +417,8 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
        return -EBUSY;
 }
 
-static void radeon_init_pipes(struct drm_device *dev)
+static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
 {
-       drm_radeon_private_t *dev_priv = dev->dev_private;
        uint32_t gb_tile_config, gb_pipe_sel = 0;
 
        if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
@@ -437,12 +436,11 @@ static void radeon_init_pipes(struct drm_device *dev)
                dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
        } else {
                /* R3xx */
-               if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
-                    dev->pdev->device != 0x4144) ||
+               if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
                    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
                        dev_priv->num_gb_pipes = 2;
                } else {
-                       /* RV3xx/R300 AD */
+                       /* R3Vxx */
                        dev_priv->num_gb_pipes = 1;
                }
        }
@@ -738,7 +736,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
 
        /* setup the raster pipes */
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
-           radeon_init_pipes(dev);
+           radeon_init_pipes(dev_priv);
 
        /* Reset the CP ring */
        radeon_do_cp_reset(dev_priv);
@@ -1646,7 +1644,6 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
        radeon_cp_load_microcode(dev_priv);
        radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
 
-       dev_priv->have_z_offset = 0;
        radeon_do_engine_reset(dev);
        radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
 
index 20c52da0f62f9e46d19fda9fe2170e2b1b304981..5ab2cf96a26498a3d29aba7fe18ed6c46716b161 100644 (file)
@@ -230,7 +230,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        memset(&parser, 0, sizeof(struct radeon_cs_parser));
        parser.filp = filp;
        parser.rdev = rdev;
-       parser.dev = rdev->dev;
        r = radeon_cs_parser_init(&parser, data);
        if (r) {
                DRM_ERROR("Failed to initialize parser !\n");
@@ -247,8 +246,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        }
        r = radeon_cs_parser_relocs(&parser);
        if (r) {
-               if (r != -ERESTARTSYS)
-                       DRM_ERROR("Failed to parse relocation %d!\n", r);
+               DRM_ERROR("Failed to parse relocation !\n");
                radeon_cs_parser_fini(&parser, r);
                mutex_unlock(&rdev->cs_mutex);
                return r;
index 6f683159fc64b537e2cd681ad9599f3972255801..c85df4afcb7ac09e2f89ec21f116a7ee4bdfaa39 100644 (file)
@@ -599,11 +599,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
        struct drm_gem_object *obj;
 
        obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
-       if (obj ==  NULL) {
-               dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
-                       "can't create framebuffer\n", mode_cmd->handle);
-               return NULL;
-       }
+
        return radeon_framebuffer_create(dev, mode_cmd, obj);
 }
 
index 76e4070388c62383553ec818a5e2902c95b4b000..350962e0f346f483b02d04d37f48efb3219c2ed0 100644 (file)
@@ -267,8 +267,6 @@ typedef struct drm_radeon_private {
 
        u32 scratch_ages[5];
 
-       int have_z_offset;
-
        /* starting from here on, data is preserved accross an open */
        uint32_t flags;         /* see radeon_chip_flags */
        resource_size_t fb_aper_offset;
index 4478b994b50013467c1d39e930934fe0c2665331..d42bc512d75a8cd1c6a8ae3e91443a67d8121f74 100644 (file)
@@ -1155,12 +1155,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        case ENCODER_OBJECT_ID_INTERNAL_DAC2:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
                atombios_dac_setup(encoder, ATOM_ENABLE);
-               if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
-                       if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
-                               atombios_tv_setup(encoder, ATOM_ENABLE);
-                       else
-                               atombios_tv_setup(encoder, ATOM_DISABLE);
-               }
+               if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+                       atombios_tv_setup(encoder, ATOM_ENABLE);
                break;
        }
        atombios_apply_encoder_quirks(encoder, adjusted_mode);
index 7547ec6418bb5edd53608fdabd155848c302eec6..22ce4d6015e8f7700f50d443d64739287ec7426b 100644 (file)
@@ -261,7 +261,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
        if (!ref_div)
                return 1;
 
-       vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
+       vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
 
        /*
         * This is horribly crude: the VCO frequency range is divided into
index 183bef831afb5752b111b846baf89adf3edb0f4a..00382122869b9c94dfeeff68c1f6a4d5ec2ad821 100644 (file)
@@ -89,7 +89,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
                udelay(panel_pwr_delay * 1000);
                WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
                WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
-               udelay(panel_pwr_delay * 1000);
                break;
        }
 
index fc64a20b85830306b62a5c62521b2f8e87d67f89..3a12bb0c0563daa412aa07ea61bf6ab0368fc4d5 100644 (file)
 #define NTSC_TV_PLL_N_14 693
 #define NTSC_TV_PLL_P_14 7
 
-#define PAL_TV_PLL_M_14 19
-#define PAL_TV_PLL_N_14 353
-#define PAL_TV_PLL_P_14 5
-
 #define VERT_LEAD_IN_LINES 2
 #define FRAC_BITS 0xe
 #define FRAC_MASK 0x3fff
@@ -209,24 +205,9 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
                630627,             /* defRestart */
                347,                /* crtcPLL_N */
                14,                 /* crtcPLL_M */
-               8,                  /* crtcPLL_postDiv */
+                       8,                  /* crtcPLL_postDiv */
                1022,               /* pixToTV */
        },
-       { /* PAL timing for 14 Mhz ref clk */
-               800,                /* horResolution */
-               600,                /* verResolution */
-               TV_STD_PAL,         /* standard */
-               1131,               /* horTotal */
-               742,                /* verTotal */
-               813,                /* horStart */
-               840,                /* horSyncStart */
-               633,                /* verSyncStart */
-               708369,             /* defRestart */
-               211,                /* crtcPLL_N */
-               9,                  /* crtcPLL_M */
-               8,                  /* crtcPLL_postDiv */
-               759,                /* pixToTV */
-       },
 };
 
 #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -261,7 +242,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
                if (pll->reference_freq == 2700)
                        const_ptr = &available_tv_modes[1];
                else
-                       const_ptr = &available_tv_modes[3];
+                       const_ptr = &available_tv_modes[1]; /* FIX ME */
        }
        return const_ptr;
 }
@@ -704,9 +685,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
                        n = PAL_TV_PLL_N_27;
                        p = PAL_TV_PLL_P_27;
                } else {
-                       m = PAL_TV_PLL_M_14;
-                       n = PAL_TV_PLL_N_14;
-                       p = PAL_TV_PLL_P_14;
+                       m = PAL_TV_PLL_M_27;
+                       n = PAL_TV_PLL_N_27;
+                       p = PAL_TV_PLL_P_27;
                }
        }
 
index 474791076cf974c51c3c07939190e4ad08b0f87f..38537d971a3e3b5a666fe24b8f63ee901b1c121e 100644 (file)
@@ -101,7 +101,6 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
                        DRM_ERROR("Invalid depth buffer offset\n");
                        return -EINVAL;
                }
-               dev_priv->have_z_offset = 1;
                break;
 
        case RADEON_EMIT_PP_CNTL:
@@ -877,12 +876,6 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
                if (tmp & RADEON_BACK)
                        flags |= RADEON_FRONT;
        }
-       if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
-               if (!dev_priv->have_z_offset) {
-                       printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
-                       flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
-               }
-       }
 
        if (flags & (RADEON_FRONT | RADEON_BACK)) {
 
index 170029747dafebeff8698f978bf064b9d274c6fc..4444f48c496eb0d4f6bf88405a3700470874e527 100644 (file)
@@ -57,7 +57,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
        WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
 
        tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
-       tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
+       tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
        WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
 
        tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
index c42403b0827cd3222f56fe0d1c4868d55454a90c..b0efd0ddae7a223d9ac2c7b323e9080a8f098b57 100644 (file)
@@ -1034,14 +1034,13 @@ int rv770_init(struct radeon_device *rdev)
        if (rdev->accel_working) {
                r = radeon_ib_pool_init(rdev);
                if (r) {
-                       dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+                       DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+                       rdev->accel_working = false;
+               }
+               r = r600_ib_test(rdev);
+               if (r) {
+                       DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                        rdev->accel_working = false;
-               } else {
-                       r = r600_ib_test(rdev);
-                       if (r) {
-                               dev_err(rdev->dev, "IB test failed (%d).\n", r);
-                               rdev->accel_working = false;
-                       }
                }
        }
        return 0;
index 8cb88e7e0302f11405ba776865fb720b31f9e2df..c70927ecda2179ea0b616f0818330d605e54350e 100644 (file)
@@ -330,7 +330,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        fbo->vm_node = NULL;
-       atomic_set(&fbo->cpu_writers, 0);
 
        fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
        if (fbo->mem.mm_node)
index 3d5b8b0705defb8a340649bdc80a28cca37f583f..7bcb89f39ce8912b79d565e8b60228e542a08301 100644 (file)
@@ -466,7 +466,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
        void *from_virtual;
        void *to_virtual;
        int i;
-       int ret = -ENOMEM;
+       int ret;
 
        if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
                ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -485,10 +485,8 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
 
        for (i = 0; i < ttm->num_pages; ++i) {
                from_page = read_mapping_page(swap_space, i, NULL);
-               if (IS_ERR(from_page)) {
-                       ret = PTR_ERR(from_page);
+               if (IS_ERR(from_page))
                        goto out_err;
-               }
                to_page = __ttm_tt_get_page(ttm, i);
                if (unlikely(to_page == NULL))
                        goto out_err;
@@ -511,7 +509,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
        return 0;
 out_err:
        ttm_tt_free_alloced_pages(ttm);
-       return ret;
+       return -ENOMEM;
 }
 
 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -523,7 +521,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
        void *from_virtual;
        void *to_virtual;
        int i;
-       int ret = -ENOMEM;
 
        BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
        BUG_ON(ttm->caching_state != tt_cached);
@@ -546,7 +543,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
                                                0);
                if (unlikely(IS_ERR(swap_storage))) {
                        printk(KERN_ERR "Failed allocating swap storage.\n");
-                       return PTR_ERR(swap_storage);
+                       return -ENOMEM;
                }
        } else
                swap_storage = persistant_swap_storage;
@@ -558,10 +555,9 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
                if (unlikely(from_page == NULL))
                        continue;
                to_page = read_mapping_page(swap_space, i, NULL);
-               if (unlikely(IS_ERR(to_page))) {
-                       ret = PTR_ERR(to_page);
+               if (unlikely(to_page == NULL))
                        goto out_err;
-               }
+
                preempt_disable();
                from_virtual = kmap_atomic(from_page, KM_USER0);
                to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -585,5 +581,5 @@ out_err:
        if (!persistant_swap_storage)
                fput(swap_storage);
 
-       return ret;
+       return -ENOMEM;
 }
index aa8688d25e848457d81797d11d07700abc5bf0b0..1ac0c93603c903a1d7a39f1efb10530936a2f4e1 100644 (file)
@@ -954,7 +954,6 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
                }
 
        } else if (strncmp(curr_pos, "target ", 7) == 0) {
-               struct pci_bus *pbus;
                unsigned int domain, bus, devfn;
                struct vga_device *vgadev;
 
@@ -962,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
                remaining -= 7;
                pr_devel("client 0x%p called 'target'\n", priv);
                /* if target is default */
-               if (!strncmp(curr_pos, "default", 7))
+               if (!strncmp(buf, "default", 7))
                        pdev = pci_dev_get(vga_default_device());
                else {
                        if (!vga_pci_str_to_vars(curr_pos, remaining,
@@ -970,31 +969,18 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
                                ret_val = -EPROTO;
                                goto done;
                        }
-                       pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
-                               domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
-
-                       pbus = pci_find_bus(domain, bus);
-                       pr_devel("vgaarb: pbus %p\n", pbus);
-                       if (pbus == NULL) {
-                               pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
-                                       domain, bus);
-                               ret_val = -ENODEV;
-                               goto done;
-                       }
-                       pdev = pci_get_slot(pbus, devfn);
-                       pr_devel("vgaarb: pdev %p\n", pdev);
+
+                       pdev = pci_get_bus_and_slot(bus, devfn);
                        if (!pdev) {
-                               pr_err("vgaarb: invalid PCI address %x:%x\n",
-                                       bus, devfn);
+                               pr_info("vgaarb: invalid PCI address!\n");
                                ret_val = -ENODEV;
                                goto done;
                        }
                }
 
                vgadev = vgadev_find(pdev);
-               pr_devel("vgaarb: vgadev %p\n", vgadev);
                if (vgadev == NULL) {
-                       pr_err("vgaarb: this pci device is not a vga device\n");
+                       pr_info("vgaarb: this pci device is not a vga device\n");
                        pci_dev_put(pdev);
                        ret_val = -ENODEV;
                        goto done;
@@ -1012,8 +998,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
                        }
                }
                if (i == MAX_USER_CARDS) {
-                       pr_err("vgaarb: maximum user cards (%d) number reached!\n",
-                               MAX_USER_CARDS);
+                       pr_err("vgaarb: maximum user cards number reached!\n");
                        pci_dev_put(pdev);
                        /* XXX: which value to return? */
                        ret_val =  -ENOMEM;
index 15978635d34fadd7a23c7c4d3c08f3f191bddd78..96783542f7f56a34e47ca7f43d6ecbda094bd499 100644 (file)
@@ -1306,7 +1306,6 @@ static const struct hid_device_id hid_blacklist[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1659,6 +1658,8 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
index 3975e039c3ddbedfb4e7246b3b00004ba8c70ddd..cab13e8c7d292c7ed93a06b8a58acc6072d50fd3 100644 (file)
@@ -53,13 +53,10 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 static int gyration_event(struct hid_device *hdev, struct hid_field *field,
                struct hid_usage *usage, __s32 value)
 {
-
-       if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
-               return 0;
+       struct input_dev *input = field->hidinput->input;
 
        if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
                        (usage->hid & 0xff) == 0x82) {
-               struct input_dev *input = field->hidinput->input;
                input_event(input, usage->type, usage->code, 1);
                input_sync(input);
                input_event(input, usage->type, usage->code, 0);
@@ -73,7 +70,6 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
 static const struct hid_device_id gyration_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, gyration_devices);
index 252853d4dbe3f84313112113f318df7c96578654..e380e7bee8c1c116d1c3db7beeb9f84a850f6ee3 100644 (file)
 #define USB_VENDOR_ID_GYRATION         0x0c16
 #define USB_DEVICE_ID_GYRATION_REMOTE  0x0002
 #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
-#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
 
 #define USB_VENDOR_ID_HAPP             0x078b
 #define USB_DEVICE_ID_UGCI_DRIVING     0x0010
 #define USB_VENDOR_ID_NEC              0x073e
 #define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301
 
-#define USB_VENDOR_ID_NEXTWINDOW       0x1926
-#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN   0x0003
-
 #define USB_VENDOR_ID_NTRIG                0x1b96
 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN   0x0001
 
 #define USB_VENDOR_ID_SUNPLUS          0x04fc
 #define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8
 
+#define USB_VENDOR_ID_TENX             0x1130
+#define USB_DEVICE_ID_TENX_IBUDDY1     0x0001
+#define USB_DEVICE_ID_TENX_IBUDDY2     0x0002
+
 #define USB_VENDOR_ID_THRUSTMASTER     0x044f
 
 #define USB_VENDOR_ID_TOPMAX           0x0663
index 66579c0bf32821c7c418e2114fa524f9d896965f..cdd136942bcaa9e1435b1d2efc5444bf6f3317fc 100644 (file)
@@ -105,15 +105,11 @@ out:
 static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
 {
        unsigned int minor = iminor(file->f_path.dentry->d_inode);
-       struct hid_device *dev;
+       /* FIXME: What stops hidraw_table going NULL */
+       struct hid_device *dev = hidraw_table[minor]->hid;
        __u8 *buf;
        int ret = 0;
 
-       if (!hidraw_table[minor])
-               return -ENODEV;
-
-       dev = hidraw_table[minor]->hid;
-
        if (!dev->hid_output_raw_report)
                return -ENODEV;
 
@@ -241,16 +237,11 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
        struct inode *inode = file->f_path.dentry->d_inode;
        unsigned int minor = iminor(inode);
        long ret = 0;
-       struct hidraw *dev;
+       /* FIXME: What stops hidraw_table going NULL */
+       struct hidraw *dev = hidraw_table[minor];
        void __user *user_arg = (void __user*) arg;
 
        lock_kernel();
-       dev = hidraw_table[minor];
-       if (!dev) {
-               ret = -ENODEV;
-               goto out;
-       }
-
        switch (cmd) {
                case HIDIOCGRDESCSIZE:
                        if (put_user(dev->hid->rsize, (int __user *)arg))
@@ -323,7 +314,6 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
 
                ret = -ENOTTY;
        }
-out:
        unlock_kernel();
        return ret;
 }
index 968eaad17eac2d9ec55713f092d2eac7be19dcb1..92ff251d5017e1745aacece31f25eac9d28883fe 100755 (executable)
@@ -318,7 +318,6 @@ static int hid_submit_out(struct hid_device *hid)
                        err_hid("usb_submit_urb(out) failed");
                        return -1;
                }
-               usbhid->last_out = jiffies;
        } else {
                /*
                 * queue work to wake up the device.
@@ -380,7 +379,6 @@ static int hid_submit_ctrl(struct hid_device *hid)
                        err_hid("usb_submit_urb(ctrl) failed");
                        return -1;
                }
-               usbhid->last_ctrl = jiffies;
        } else {
                /*
                 * queue work to wake up the device.
@@ -516,20 +514,9 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
                usbhid->out[usbhid->outhead].report = report;
                usbhid->outhead = head;
 
-               if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
+               if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl))
                        if (hid_submit_out(hid))
                                clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
-               } else {
-                       /*
-                        * the queue is known to run
-                        * but an earlier request may be stuck
-                        * we may need to time out
-                        * no race because this is called under
-                        * spinlock
-                        */
-                       if (time_after(jiffies, usbhid->last_out + HZ * 5))
-                               usb_unlink_urb(usbhid->urbout);
-               }
                return;
        }
 
@@ -550,20 +537,9 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
        usbhid->ctrl[usbhid->ctrlhead].dir = dir;
        usbhid->ctrlhead = head;
 
-       if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
+       if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl))
                if (hid_submit_ctrl(hid))
                        clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
-       } else {
-               /*
-                * the queue is known to run
-                * but an earlier request may be stuck
-                * we may need to time out
-                * no race because this is called under
-                * spinlock
-                */
-               if (time_after(jiffies, usbhid->last_ctrl + HZ * 5))
-                       usb_unlink_urb(usbhid->urbctrl);
-       }
 }
 
 void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir)
@@ -1000,6 +976,16 @@ static int usbhid_start(struct hid_device *hid)
                }
        }
 
+       init_waitqueue_head(&usbhid->wait);
+       INIT_WORK(&usbhid->reset_work, hid_reset);
+       INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
+       setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
+
+       spin_lock_init(&usbhid->lock);
+
+       usbhid->intf = intf;
+       usbhid->ifnum = interface->desc.bInterfaceNumber;
+
        usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
        if (!usbhid->urbctrl) {
                ret = -ENOMEM;
@@ -1170,14 +1156,6 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
 
        hid->driver_data = usbhid;
        usbhid->hid = hid;
-       usbhid->intf = intf;
-       usbhid->ifnum = interface->desc.bInterfaceNumber;
-
-       init_waitqueue_head(&usbhid->wait);
-       INIT_WORK(&usbhid->reset_work, hid_reset);
-       INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
-       setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
-       spin_lock_init(&usbhid->lock);
 
        ret = hid_add_device(hid);
        if (ret) {
index 64c5dee271061d37aa5232ab9ffc84eae312ecdf..5713b93e76cbdfdcdfce904b1dbe18763562f890 100644 (file)
@@ -37,7 +37,6 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
-       { USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN, HID_QUIRK_MULTI_INPUT},
        { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
 
index ec20400c7f2904bf793ac82b37cac65df03ac161..08f505ca2e3db47426a2940fb2876a2e89ba9e14 100644 (file)
@@ -80,14 +80,12 @@ struct usbhid_device {
        unsigned char ctrlhead, ctrltail;                               /* Control fifo head & tail */
        char *ctrlbuf;                                                  /* Control buffer */
        dma_addr_t ctrlbuf_dma;                                         /* Control buffer dma */
-       unsigned long last_ctrl;                                                /* record of last output for timeouts */
 
        struct urb *urbout;                                             /* Output URB */
        struct hid_output_fifo out[HID_CONTROL_FIFO_SIZE];              /* Output pipe fifo */
        unsigned char outhead, outtail;                                 /* Output pipe fifo head & tail */
        char *outbuf;                                                   /* Output buffer */
        dma_addr_t outbuf_dma;                                          /* Output buffer dma */
-       unsigned long last_out;                                                 /* record of last output for timeouts */
 
        spinlock_t lock;                                                /* fifo spinlock */
        unsigned long iofl;                                             /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
index 2ad62c339cd2c9cf849dc3a551a8d47548ef2190..6c9ace1b76f6db6ca810c5cf2c7cb89889da7984 100644 (file)
@@ -213,7 +213,7 @@ int __init ams_init(void)
        return -ENODEV;
 }
 
-void ams_sensor_detach(void)
+void ams_exit(void)
 {
        /* Remove input device */
        ams_input_exit();
@@ -221,6 +221,9 @@ void ams_sensor_detach(void)
        /* Remove attributes */
        device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
 
+       /* Shut down implementation */
+       ams_info.exit();
+
        /* Flush interrupt worker
         *
         * We do this after ams_info.exit(), because an interrupt might
@@ -236,12 +239,6 @@ void ams_sensor_detach(void)
        pmf_unregister_irq_client(&ams_freefall_client);
 }
 
-static void __exit ams_exit(void)
-{
-       /* Shut down implementation */
-       ams_info.exit();
-}
-
 MODULE_AUTHOR("Stelian Pop, Michael Hanselmann");
 MODULE_DESCRIPTION("Apple Motion Sensor driver");
 MODULE_LICENSE("GPL");
index abeecd27b484fe2345f3c01dda91b7f74f079cd6..2cbf8a6506c7e8fad72736773efa238367bf03f5 100644 (file)
@@ -238,8 +238,6 @@ static int ams_i2c_probe(struct i2c_client *client,
 static int ams_i2c_remove(struct i2c_client *client)
 {
        if (ams_info.has_device) {
-               ams_sensor_detach();
-
                /* Disable interrupts */
                ams_i2c_set_irq(AMS_IRQ_ALL, 0);
 
index 4f61b3ee1b08b4ca0ee65f018becb0ef7128892e..fb18b3d3162bcebfe908fa5c678ccf6893fdb25d 100644 (file)
@@ -133,8 +133,6 @@ static void ams_pmu_get_xyz(s8 *x, s8 *y, s8 *z)
 
 static void ams_pmu_exit(void)
 {
-       ams_sensor_detach();
-
        /* Disable interrupts */
        ams_pmu_set_irq(AMS_IRQ_ALL, 0);
 
index b28d7e27a031727bc7cc83c18d28b9fc15b2160a..5ed387b0bd9aa08185e05bd790ac06976a8a7bed 100644 (file)
@@ -61,7 +61,6 @@ extern struct ams ams_info;
 
 extern void ams_sensors(s8 *x, s8 *y, s8 *z);
 extern int ams_sensor_attach(void);
-extern void ams_sensor_detach(void);
 
 extern int ams_pmu_init(struct device_node *np);
 extern int ams_i2c_init(struct device_node *np);
index 585219167fa754ac72f9e0a96c15d278cc36fc67..2d7bceeed0bc3daf10a8ae2d58d106d575107eb2 100644 (file)
@@ -53,7 +53,6 @@ struct coretemp_data {
        struct mutex update_lock;
        const char *name;
        u32 id;
-       u16 core_id;
        char valid;             /* zero until following fields are valid */
        unsigned long last_updated;     /* in jiffies */
        int temp;
@@ -76,7 +75,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute
        if (attr->index == SHOW_NAME)
                ret = sprintf(buf, "%s\n", data->name);
        else    /* show label */
-               ret = sprintf(buf, "Core %d\n", data->core_id);
+               ret = sprintf(buf, "Core %d\n", data->id);
        return ret;
 }
 
@@ -229,7 +228,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
                if (err) {
                        dev_warn(dev,
                                 "Unable to access MSR 0xEE, for Tjmax, left"
-                                " at default\n");
+                                " at default");
                } else if (eax & 0x40000000) {
                        tjmax = tjmax_ee;
                }
@@ -256,9 +255,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
        }
 
        data->id = pdev->id;
-#ifdef CONFIG_SMP
-       data->core_id = c->cpu_core_id;
-#endif
        data->name = "coretemp";
        mutex_init(&data->update_lock);
 
@@ -356,10 +352,6 @@ struct pdev_entry {
        struct list_head list;
        struct platform_device *pdev;
        unsigned int cpu;
-#ifdef CONFIG_SMP
-       u16 phys_proc_id;
-       u16 cpu_core_id;
-#endif
 };
 
 static LIST_HEAD(pdev_list);
@@ -370,22 +362,6 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        int err;
        struct platform_device *pdev;
        struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
-
-       mutex_lock(&pdev_list_mutex);
-
-#ifdef CONFIG_SMP
-       /* Skip second HT entry of each core */
-       list_for_each_entry(pdev_entry, &pdev_list, list) {
-               if (c->phys_proc_id == pdev_entry->phys_proc_id &&
-                   c->cpu_core_id == pdev_entry->cpu_core_id) {
-                       err = 0;        /* Not an error */
-                       goto exit;
-               }
-       }
-#endif
 
        pdev = platform_device_alloc(DRVNAME, cpu);
        if (!pdev) {
@@ -409,10 +385,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
 
        pdev_entry->pdev = pdev;
        pdev_entry->cpu = cpu;
-#ifdef CONFIG_SMP
-       pdev_entry->phys_proc_id = c->phys_proc_id;
-       pdev_entry->cpu_core_id = c->cpu_core_id;
-#endif
+       mutex_lock(&pdev_list_mutex);
        list_add_tail(&pdev_entry->list, &pdev_list);
        mutex_unlock(&pdev_list_mutex);
 
@@ -423,7 +396,6 @@ exit_device_free:
 exit_device_put:
        platform_device_put(pdev);
 exit:
-       mutex_unlock(&pdev_list_mutex);
        return err;
 }
 
index afebc3439881cbec36aa917538ea9fe1dde4bc52..e2107e533ede3371aeff3e4213bac1e06c624dfb 100644 (file)
@@ -79,7 +79,7 @@ I2C_CLIENT_INSMOD_2(f75373, f75375);
 #define F75375_REG_PWM2_DROP_DUTY      0x6C
 
 #define FAN_CTRL_LINEAR(nr)            (4 + nr)
-#define FAN_CTRL_MODE(nr)              (4 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr)              (5 + ((nr) * 2))
 
 /*
  * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
                return -EINVAL;
 
        fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
-       fanmode &= ~(3 << FAN_CTRL_MODE(nr));
+       fanmode = ~(3 << FAN_CTRL_MODE(nr));
 
        switch (val) {
        case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        conf = f75375_read8(client, F75375_REG_CONFIG1);
-       conf &= ~(1 << FAN_CTRL_LINEAR(nr));
+       conf = ~(1 << FAN_CTRL_LINEAR(nr));
 
        if (val == 0)
                conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
index f16d60f0bf47581ae2380bcb986f7b37aa7dea1b..be475e844c2a73b146314bf3209f487c02110f59 100644 (file)
@@ -324,8 +324,8 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
        lis3lv02d_joystick_disable();
        lis3lv02d_poweroff(&lis3_dev);
 
-       led_classdev_unregister(&hpled_led.led_classdev);
        flush_work(&hpled_led.work);
+       led_classdev_unregister(&hpled_led.led_classdev);
 
        return lis3lv02d_remove_fs(&lis3_dev);
 }
index 497476f637a98b3ef8bb9d3107edf6b0b1e0c240..a3749cb0f181ec623a4d628ea615c66a154a6c5e 100644 (file)
@@ -80,13 +80,6 @@ superio_inb(int reg)
        return inb(VAL);
 }
 
-static inline void
-superio_outb(int reg, int val)
-{
-       outb(reg, REG);
-       outb(val, VAL);
-}
-
 static int superio_inw(int reg)
 {
        int val;
@@ -1043,21 +1036,6 @@ static int __init it87_find(unsigned short *address,
                        sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
 
                reg = superio_inb(IT87_SIO_PINX2_REG);
-               /*
-                * The IT8720F has no VIN7 pin, so VCCH should always be
-                * routed internally to VIN7 with an internal divider.
-                * Curiously, there still is a configuration bit to control
-                * this, which means it can be set incorrectly. And even
-                * more curiously, many boards out there are improperly
-                * configured, even though the IT8720F datasheet claims
-                * that the internal routing of VCCH to VIN7 is the default
-                * setting. So we force the internal routing in this case.
-                */
-               if (sio_data->type == it8720 && !(reg & (1 << 1))) {
-                       reg |= (1 << 1);
-                       superio_outb(IT87_SIO_PINX2_REG, reg);
-                       pr_notice("it87: Routing internal VCCH to in7\n");
-               }
                if (reg & (1 << 0))
                        pr_info("it87: in3 is VCC (+5V)\n");
                if (reg & (1 << 1))
index 4f84d1a76d52ae10d76b7e2af85e16359df2f98d..1fe99511184111a68a38e151e96e03c9491f59ad 100644 (file)
@@ -120,7 +120,7 @@ static ssize_t show_temp(struct device *dev,
        int temp;
        struct k8temp_data *data = k8temp_update_device(dev);
 
-       if (data->swap_core_select && (data->sensorsp & SEL_CORE))
+       if (data->swap_core_select)
                core = core ? 0 : 1;
 
        temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
@@ -143,37 +143,6 @@ static struct pci_device_id k8temp_ids[] = {
 
 MODULE_DEVICE_TABLE(pci, k8temp_ids);
 
-static int __devinit is_rev_g_desktop(u8 model)
-{
-       u32 brandidx;
-
-       if (model < 0x69)
-               return 0;
-
-       if (model == 0xc1 || model == 0x6c || model == 0x7c)
-               return 0;
-
-       /*
-        * Differentiate between AM2 and ASB1.
-        * See "Constructing the processor Name String" in "Revision
-        * Guide for AMD NPT Family 0Fh Processors" (33610).
-        */
-       brandidx = cpuid_ebx(0x80000001);
-       brandidx = (brandidx >> 9) & 0x1f;
-
-       /* Single core */
-       if ((model == 0x6f || model == 0x7f) &&
-           (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
-               return 0;
-
-       /* Dual core */
-       if (model == 0x6b &&
-           (brandidx == 0xb || brandidx == 0xc))
-               return 0;
-
-       return 1;
-}
-
 static int __devinit k8temp_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
@@ -210,12 +179,12 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
                                 "wrong - check erratum #141\n");
                }
 
-               if (is_rev_g_desktop(model)) {
+               if ((model >= 0x69) &&
+                   !(model == 0xc1 || model == 0x6c || model == 0x7c)) {
                        /*
-                        * RevG desktop CPUs (i.e. no socket S1G1 or
-                        * ASB1 parts) need additional offset,
-                        * otherwise reported temperature is below
-                        * ambient temperature
+                        * RevG desktop CPUs (i.e. no socket S1G1 parts)
+                        * need additional offset, otherwise reported
+                        * temperature is below ambient temperature
                         */
                        data->temp_offset = 21000;
                }
index 5d5ed69851dbe9c8cc495ce198a1bef23ab741a0..cf5afb9a10abbd9af3432dce36249a23edfcab0d 100644 (file)
@@ -127,14 +127,12 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
 
        /*
         * Common configuration
-        * BDU: (12 bits sensors only) LSB and MSB values are not updated until
-        *      both have been read. So the value read will always be correct.
+        * BDU: LSB and MSB values are not updated until both have been read.
+        *      So the value read will always be correct.
         */
-       if (lis3->whoami == LIS_DOUBLE_ID) {
-               lis3->read(lis3, CTRL_REG2, &reg);
-               reg |= CTRL2_BDU;
-               lis3->write(lis3, CTRL_REG2, reg);
-       }
+       lis3->read(lis3, CTRL_REG2, &reg);
+       reg |= CTRL2_BDU;
+       lis3->write(lis3, CTRL_REG2, reg);
 }
 EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
 
@@ -363,8 +361,7 @@ static ssize_t lis3lv02d_calibrate_store(struct device *dev,
 }
 
 /* conversion btw sampling rate and the register values */
-static int lis3_12_rates[4] = {40, 160, 640, 2560};
-static int lis3_8_rates[2] = {100, 400};
+static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560};
 static ssize_t lis3lv02d_rate_show(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
@@ -372,13 +369,8 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
        int val;
 
        lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
-
-       if (lis3_dev.whoami == LIS_DOUBLE_ID)
-               val = lis3_12_rates[(ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4];
-       else
-               val = lis3_8_rates[(ctrl & CTRL1_DR) >> 7];
-
-       return sprintf(buf, "%d\n", val);
+       val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
+       return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
 }
 
 static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
index 7cdd76f31ce32ba00f10314bb4016bed6bdaf26c..3e1ff46f72d3ac2cdfde3f3bf1b83637513c42b8 100644 (file)
@@ -103,7 +103,6 @@ enum lis3lv02d_ctrl1 {
        CTRL1_DF1       = 0x20,
        CTRL1_PD0       = 0x40,
        CTRL1_PD1       = 0x80,
-       CTRL1_DR        = 0x80, /* Data rate on 8 bits */
 };
 enum lis3lv02d_ctrl2 {
        CTRL2_DAS       = 0x01,
index b0d03640af4b55ab14a610b90788c7b0dfafb693..6c53d987de1088dc5f4a199b6a0c44236b220ac4 100644 (file)
@@ -1286,7 +1286,6 @@ static int lm85_probe(struct i2c_client *client,
        switch (data->type) {
        case adm1027:
        case adt7463:
-       case adt7468:
        case emc6d100:
        case emc6d102:
                data->freq_map = adm1027_freq_map;
index 21d201befc2cdb7aeebb11c495f5ba3c10e74215..65c232a9d0c5a917b533e2aeac34c2c13aa4fba5 100644 (file)
@@ -45,7 +45,9 @@ enum ltc4245_cmd {
        LTC4245_VEEIN                   = 0x19,
        LTC4245_VEESENSE                = 0x1a,
        LTC4245_VEEOUT                  = 0x1b,
-       LTC4245_GPIOADC                 = 0x1c,
+       LTC4245_GPIOADC1                = 0x1c,
+       LTC4245_GPIOADC2                = 0x1d,
+       LTC4245_GPIOADC3                = 0x1e,
 };
 
 struct ltc4245_data {
@@ -59,7 +61,7 @@ struct ltc4245_data {
        u8 cregs[0x08];
 
        /* Voltage registers */
-       u8 vregs[0x0d];
+       u8 vregs[0x0f];
 };
 
 static struct ltc4245_data *ltc4245_update_device(struct device *dev)
@@ -84,7 +86,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
                                data->cregs[i] = val;
                }
 
-               /* Read voltage registers -- 0x10 to 0x1c */
+               /* Read voltage registers -- 0x10 to 0x1f */
                for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
                        val = i2c_smbus_read_byte_data(client, i+0x10);
                        if (unlikely(val < 0))
@@ -126,7 +128,9 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
        case LTC4245_VEEOUT:
                voltage = regval * -55;
                break;
-       case LTC4245_GPIOADC:
+       case LTC4245_GPIOADC1:
+       case LTC4245_GPIOADC2:
+       case LTC4245_GPIOADC3:
                voltage = regval * 10;
                break;
        default:
@@ -293,7 +297,9 @@ LTC4245_ALARM(in7_min_alarm,        (1 << 2),       LTC4245_FAULT2);
 LTC4245_ALARM(in8_min_alarm,   (1 << 3),       LTC4245_FAULT2);
 
 /* GPIO voltages */
-LTC4245_VOLTAGE(in9_input,                     LTC4245_GPIOADC);
+LTC4245_VOLTAGE(in9_input,                     LTC4245_GPIOADC1);
+LTC4245_VOLTAGE(in10_input,                    LTC4245_GPIOADC2);
+LTC4245_VOLTAGE(in11_input,                    LTC4245_GPIOADC3);
 
 /* Power Consumption (virtual) */
 LTC4245_POWER(power1_input,                    LTC4245_12VSENSE);
@@ -336,6 +342,8 @@ static struct attribute *ltc4245_attributes[] = {
        &sensor_dev_attr_in8_min_alarm.dev_attr.attr,
 
        &sensor_dev_attr_in9_input.dev_attr.attr,
+       &sensor_dev_attr_in10_input.dev_attr.attr,
+       &sensor_dev_attr_in11_input.dev_attr.attr,
 
        &sensor_dev_attr_power1_input.dev_attr.attr,
        &sensor_dev_attr_power2_input.dev_attr.attr,
index 68e69a49633cc2ac5cbbf9572b829c8bc6aadd76..4a64b85d4ec955d383932fe25d5edb726981a650 100644 (file)
@@ -1610,8 +1610,11 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
 
 static int __init pc87360_device_add(unsigned short address)
 {
-       struct resource res[3];
-       int err, i, res_count;
+       struct resource res = {
+               .name   = "pc87360",
+               .flags  = IORESOURCE_IO,
+       };
+       int err, i;
 
        pdev = platform_device_alloc("pc87360", address);
        if (!pdev) {
@@ -1620,28 +1623,22 @@ static int __init pc87360_device_add(unsigned short address)
                goto exit;
        }
 
-       memset(res, 0, 3 * sizeof(struct resource));
-       res_count = 0;
        for (i = 0; i < 3; i++) {
                if (!extra_isa[i])
                        continue;
-               res[res_count].start = extra_isa[i];
-               res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
-               res[res_count].name = "pc87360",
-               res[res_count].flags = IORESOURCE_IO,
+               res.start = extra_isa[i];
+               res.end = extra_isa[i] + PC87360_EXTENT - 1;
 
-               err = acpi_check_resource_conflict(&res[res_count]);
+               err = acpi_check_resource_conflict(&res);
                if (err)
                        goto exit_device_put;
 
-               res_count++;
-       }
-
-       err = platform_device_add_resources(pdev, res, res_count);
-       if (err) {
-               printk(KERN_ERR "pc87360: Device resources addition failed "
-                      "(%d)\n", err);
-               goto exit_device_put;
+               err = platform_device_add_resources(pdev, &res, 1);
+               if (err) {
+                       printk(KERN_ERR "pc87360: Device resource[%d] "
+                              "addition failed (%d)\n", i, err);
+                       goto exit_device_put;
+               }
        }
 
        err = platform_device_add(pdev);
index fbc997ee67d973283c37181cd38dc499159639b0..864a371f6eb98443f0565b1d1263cb66a370e152 100644 (file)
@@ -302,13 +302,13 @@ error_ret:
  **/
 static inline int sht15_calc_temp(struct sht15_data *data)
 {
-       int d1 = temppoints[0].d1;
+       int d1 = 0;
        int i;
 
-       for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
+       for (i = 1; i < ARRAY_SIZE(temppoints); i++)
                /* Find pointer to interpolate */
                if (data->supply_uV > temppoints[i - 1].vdd) {
-                       d1 = (data->supply_uV - temppoints[i - 1].vdd)
+                       d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
                                * (temppoints[i].d1 - temppoints[i - 1].d1)
                                / (temppoints[i].vdd - temppoints[i - 1].vdd)
                                + temppoints[i - 1].d1;
@@ -541,12 +541,7 @@ static int __devinit sht15_probe(struct platform_device *pdev)
 /* If a regulator is available, query what the supply voltage actually is!*/
        data->reg = regulator_get(data->dev, "vcc");
        if (!IS_ERR(data->reg)) {
-               int voltage;
-
-               voltage = regulator_get_voltage(data->reg);
-               if (voltage)
-                       data->supply_uV = voltage;
-
+               data->supply_uV = regulator_get_voltage(data->reg);
                regulator_enable(data->reg);
                /* setup a notifier block to update this if another device
                 *  causes the voltage to change */
index d3a786b36d6ac961e1464a95c7e26289fdf7a540..20924343431b65f61048ca2fc05c5b1d93afbcd7 100644 (file)
@@ -62,9 +62,9 @@ static const u8 TMP421_TEMP_LSB[4]            = { 0x10, 0x11, 0x12, 0x13 };
 #define TMP423_DEVICE_ID                       0x23
 
 static const struct i2c_device_id tmp421_id[] = {
-       { "tmp421", 2 },
-       { "tmp422", 3 },
-       { "tmp423", 4 },
+       { "tmp421", tmp421 },
+       { "tmp422", tmp422 },
+       { "tmp423", tmp423 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, tmp421_id);
@@ -74,23 +74,21 @@ struct tmp421_data {
        struct mutex update_lock;
        char valid;
        unsigned long last_updated;
-       int channels;
+       int kind;
        u8 config;
        s16 temp[4];
 };
 
 static int temp_from_s16(s16 reg)
 {
-       /* Mask out status bits */
-       int temp = reg & ~0xf;
+       int temp = reg;
 
        return (temp * 1000 + 128) / 256;
 }
 
 static int temp_from_u16(u16 reg)
 {
-       /* Mask out status bits */
-       int temp = reg & ~0xf;
+       int temp = reg;
 
        /* Add offset for extended temperature range. */
        temp -= 64 * 256;
@@ -110,7 +108,7 @@ static struct tmp421_data *tmp421_update_device(struct device *dev)
                data->config = i2c_smbus_read_byte_data(client,
                        TMP421_CONFIG_REG_1);
 
-               for (i = 0; i < data->channels; i++) {
+               for (i = 0; i <= data->kind; i++) {
                        data->temp[i] = i2c_smbus_read_byte_data(client,
                                TMP421_TEMP_MSB[i]) << 8;
                        data->temp[i] |= i2c_smbus_read_byte_data(client,
@@ -169,7 +167,7 @@ static mode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
        devattr = container_of(a, struct device_attribute, attr);
        index = to_sensor_dev_attr(devattr)->index;
 
-       if (index < data->channels)
+       if (data->kind > index)
                return a->mode;
 
        return 0;
@@ -277,7 +275,7 @@ static int tmp421_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
-       data->channels = id->driver_data;
+       data->kind = id->driver_data;
 
        err = tmp421_init_client(client);
        if (err)
index 806f033695b5da1eb1e2a584bd1a950a425d3f5b..55edcfe5b851abe5676d1d3c857eed93c233e458 100644 (file)
@@ -41,8 +41,7 @@
   Tolapai               0x5032     32     hard     yes     yes     yes
   ICH10                 0x3a30     32     hard     yes     yes     yes
   ICH10                 0x3a60     32     hard     yes     yes     yes
-  3400/5 Series (PCH)   0x3b30     32     hard     yes     yes     yes
-  Cougar Point (PCH)    0x1c22     32     hard     yes     yes     yes
+  PCH                   0x3b30     32     hard     yes     yes     yes
 
   Features supported by this driver:
   Software PEC                     no
@@ -416,11 +415,9 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
                data->block[0] = 32;    /* max for SMBus block reads */
        }
 
-       /* Experience has shown that the block buffer can only be used for
-          SMBus (not I2C) block transactions, even though the datasheet
-          doesn't mention this limitation. */
        if ((i801_features & FEATURE_BLOCK_BUFFER)
-        && command != I2C_SMBUS_I2C_BLOCK_DATA
+        && !(command == I2C_SMBUS_I2C_BLOCK_DATA
+             && read_write == I2C_SMBUS_READ)
         && i801_set_block_buffer_mode() == 0)
                result = i801_block_transaction_by_block(data, read_write,
                                                         hwpec);
@@ -581,7 +578,6 @@ static struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
        { 0, }
 };
 
@@ -711,7 +707,6 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
        case PCI_DEVICE_ID_INTEL_ICH10_4:
        case PCI_DEVICE_ID_INTEL_ICH10_5:
        case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
-       case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
                i801_features |= FEATURE_I2C_BLOCK_READ;
                /* fall through */
        case PCI_DEVICE_ID_INTEL_82801DB_3:
index 62a5ce527aad3ee5b0e738fb853a415321d55d94..f7346a9bd95f21134fcdfd20508bc3317f75bdbf 100644 (file)
@@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg)
 
 static int pca_isa_waitforcompletion(void *pd)
 {
+       long ret = ~0;
        unsigned long timeout;
-       long ret;
 
        if (irq > -1) {
                ret = wait_event_timeout(pca_wait,
@@ -81,15 +81,11 @@ static int pca_isa_waitforcompletion(void *pd)
        } else {
                /* Do polling */
                timeout = jiffies + pca_isa_ops.timeout;
-               do {
-                       ret = time_before(jiffies, timeout);
-                       if (pca_isa_readbyte(pd, I2C_PCA_CON)
-                                       & I2C_PCA_CON_SI)
-                               break;
+               while (((pca_isa_readbyte(pd, I2C_PCA_CON)
+                               & I2C_PCA_CON_SI) == 0)
+                               && (ret = time_before(jiffies, timeout)))
                        udelay(100);
-               } while (ret);
        }
-
        return ret > 0;
 }
 
index fd295dd949861ecd6c024e836cc405ba3d7625f7..5b2213df5ed039dfb008c0cdf3aa6d2019543630 100644 (file)
@@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
 static int i2c_pca_pf_waitforcompletion(void *pd)
 {
        struct i2c_pca_pf_data *i2c = pd;
+       long ret = ~0;
        unsigned long timeout;
-       long ret;
 
        if (i2c->irq) {
                ret = wait_event_timeout(i2c->wait,
@@ -90,13 +90,10 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
        } else {
                /* Do polling */
                timeout = jiffies + i2c->adap.timeout;
-               do {
-                       ret = time_before(jiffies, timeout);
-                       if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
-                                       & I2C_PCA_CON_SI)
-                               break;
+               while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
+                               & I2C_PCA_CON_SI) == 0)
+                               && (ret = time_before(jiffies, timeout)))
                        udelay(100);
-               } while (ret);
        }
 
        return ret > 0;
@@ -224,7 +221,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
 
        if (irq) {
                ret = request_irq(irq, i2c_pca_pf_handler,
-                       IRQF_TRIGGER_FALLING, pdev->name, i2c);
+                       IRQF_TRIGGER_FALLING, i2c->adap.name, i2c);
                if (ret)
                        goto e_reqirq;
        }
index 4b728449fa6c6dae147eec21e5865aeec5386343..639be6969b60a7d7e61185b0443b5e258dd1fcf2 100755 (executable)
@@ -1478,24 +1478,14 @@ static int i2c_detect_address(struct i2c_client *temp_client, int kind,
 
        /* Make sure there is something at this address, unless forced */
        if (kind < 0) {
-               if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) {
-                       /* Special probe for FSC hwmon chips */
-                       union i2c_smbus_data dummy;
-
-                       if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
-                                          I2C_SMBUS_BYTE_DATA, &dummy) < 0)
-                               return 0;
-               } else {
-                       if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
-                                          I2C_SMBUS_QUICK, NULL) < 0)
-                               return 0;
-
-                       /* Prevent 24RF08 corruption */
-                       if ((addr & ~0x0f) == 0x50)
-                               i2c_smbus_xfer(adapter, addr, 0,
-                                              I2C_SMBUS_WRITE, 0,
-                                              I2C_SMBUS_QUICK, NULL);
-               }
+               if (i2c_smbus_xfer(adapter, addr, 0, 0, 0,
+                                  I2C_SMBUS_QUICK, NULL) < 0)
+                       return 0;
+
+               /* prevent 24RF08 corruption */
+               if ((addr & ~0x0f) == 0x50)
+                       i2c_smbus_xfer(adapter, addr, 0, 0, 0,
+                                      I2C_SMBUS_QUICK, NULL);
        }
 
        /* Finally call the custom detection function */
index a9c331301237734a3c927f9c1a80e739b78f44ea..1a32d62ed86b6bb611b8cd625a790587ef6b5735 100644 (file)
@@ -632,10 +632,12 @@ static void cmd640_init_dev(ide_drive_t *drive)
 
 static int cmd640_test_irq(ide_hwif_t *hwif)
 {
+       struct pci_dev *dev     = to_pci_dev(hwif->dev);
        int irq_reg             = hwif->channel ? ARTTIM23 : CFR;
-       u8  irq_mask            = hwif->channel ? ARTTIM23_IDE23INTR :
+       u8  irq_stat, irq_mask  = hwif->channel ? ARTTIM23_IDE23INTR :
                                                  CFR_IDE01INTR;
-       u8  irq_stat            = get_cmd640_reg(irq_reg);
+
+       pci_read_config_byte(dev, irq_reg, &irq_stat);
 
        return (irq_stat & irq_mask) ? 1 : 0;
 }
index 2de76cc08f61d335a816d752c0bf3d9f955ee1bc..64207df8da82fe5627664777518617bd0facc990 100644 (file)
@@ -506,22 +506,15 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
        return (flags & REQ_FAILED) ? -EIO : 0;
 }
 
-/*
- * returns true if rq has been completed
- */
-static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
+static void ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
 {
        unsigned int nr_bytes = cmd->nbytes - cmd->nleft;
 
        if (cmd->tf_flags & IDE_TFLAG_WRITE)
                nr_bytes -= cmd->last_xfer_len;
 
-       if (nr_bytes > 0) {
+       if (nr_bytes > 0)
                ide_complete_rq(drive, 0, nr_bytes);
-               return true;
-       }
-
-       return false;
 }
 
 static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
@@ -686,8 +679,7 @@ out_end:
                }
 
                if (uptodate == 0 && rq->bio)
-                       if (ide_cd_error_cmd(drive, cmd))
-                               return ide_stopped;
+                       ide_cd_error_cmd(drive, cmd);
 
                /* make sure it's fully ended */
                if (blk_fs_request(rq) == 0) {
index 67fb73559fd582e0bbd477500636c315834b7764..cc8633cbe133db4d9b7f81330ef66a547a6176ac 100644 (file)
@@ -428,11 +428,13 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
 {
        struct request *rq;
        int error;
-       int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
 
-       rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 
+       if (cmd->tf_flags & IDE_TFLAG_WRITE)
+               rq->cmd_flags |= REQ_RW;
+
        /*
         * (ks) We transfer currently only whole sectors.
         * This is suffient for now.  But, it would be great,
index 675fc042bc6003d2bcf291bddc63f579e53a0f4c..66b41351910ad390d5ec70dbb18c5ab99a196f45 100644 (file)
@@ -486,8 +486,7 @@ static int send_connect(struct iwch_ep *ep)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
-              V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
        skb->priority = CPL_PRIORITY_SETUP;
        set_arp_failure_handler(skb, act_open_req_arp_failure);
 
@@ -1304,8 +1303,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
-              V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
 
        rpl = cplhdr(skb);
        rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
index f8302c26774387179da91ca9790bbeaba1e43882..30bdf427ee6d099b907f13ffe0d05dfa9d53c39a 100644 (file)
@@ -752,8 +752,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                if (++priv->tx_outstanding == ipoib_sendq_size) {
                        ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
                                  tx->qp->qp_num);
-                       if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
-                               ipoib_warn(priv, "request notify on send CQ failed\n");
                        netif_stop_queue(dev);
                }
        }
index b4b22576f12a0a0aba0c99f421fcdf7cd2699016..df3eb8c9fd96adf7e0b9cc8496bfce760ad6952d 100644 (file)
@@ -1163,7 +1163,7 @@ static ssize_t create_child(struct device *dev,
 
        return ret ? ret : count;
 }
-static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
+static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
 
 static ssize_t delete_child(struct device *dev,
                            struct device_attribute *attr,
@@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct device *dev,
        return ret ? ret : count;
 
 }
-static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
+static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
 
 int ipoib_add_pkey_attr(struct net_device *dev)
 {
index 274c883ef3eaf30afd478408bd1f10c001baf0f8..b9453d068e9d81922568daa82f733649c5b623f2 100644 (file)
@@ -209,8 +209,6 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
        mem_copy->copy_buf = NULL;
 }
 
-#define IS_4K_ALIGNED(addr)    ((((unsigned long)addr) & ~MASK_4K) == 0)
-
 /**
  * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
  * and returns the length of resulting physical address array (may be less than
@@ -223,52 +221,62 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
  * where --few fragments of the same page-- are present in the SG as
  * consecutive elements. Also, it handles one entry SG.
  */
-
 static int iser_sg_to_page_vec(struct iser_data_buf *data,
                               struct iser_page_vec *page_vec,
                               struct ib_device *ibdev)
 {
-       struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
-       u64 start_addr, end_addr, page, chunk_start = 0;
+       struct scatterlist *sgl = (struct scatterlist *)data->buf;
+       struct scatterlist *sg;
+       u64 first_addr, last_addr, page;
+       int end_aligned;
+       unsigned int cur_page = 0;
        unsigned long total_sz = 0;
-       unsigned int dma_len;
-       int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
+       int i;
 
        /* compute the offset of first element */
        page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
 
-       new_chunk = 1;
-       cur_page  = 0;
        for_each_sg(sgl, sg, data->dma_nents, i) {
-               start_addr = ib_sg_dma_address(ibdev, sg);
-               if (new_chunk)
-                       chunk_start = start_addr;
-               dma_len = ib_sg_dma_len(ibdev, sg);
-               end_addr = start_addr + dma_len;
+               unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
+
                total_sz += dma_len;
 
-               /* collect page fragments until aligned or end of SG list */
-               if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
-                       new_chunk = 0;
-                       continue;
+               first_addr = ib_sg_dma_address(ibdev, sg);
+               last_addr  = first_addr + dma_len;
+
+               end_aligned   = !(last_addr  & ~MASK_4K);
+
+               /* continue to collect page fragments till aligned or SG ends */
+               while (!end_aligned && (i + 1 < data->dma_nents)) {
+                       sg = sg_next(sg);
+                       i++;
+                       dma_len = ib_sg_dma_len(ibdev, sg);
+                       total_sz += dma_len;
+                       last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
+                       end_aligned = !(last_addr  & ~MASK_4K);
                }
-               new_chunk = 1;
-
-               /* address of the first page in the contiguous chunk;
-                  masking relevant for the very first SG entry,
-                  which might be unaligned */
-               page = chunk_start & MASK_4K;
-               do {
-                       page_vec->pages[cur_page++] = page;
+
+               /* handle the 1st page in the 1st DMA element */
+               if (cur_page == 0) {
+                       page = first_addr & MASK_4K;
+                       page_vec->pages[cur_page] = page;
+                       cur_page++;
                        page += SIZE_4K;
-               } while (page < end_addr);
-       }
+               } else
+                       page = first_addr;
 
+               for (; page < last_addr; page += SIZE_4K) {
+                       page_vec->pages[cur_page] = page;
+                       cur_page++;
+               }
+
+       }
        page_vec->data_size = total_sz;
        iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
        return cur_page;
 }
 
+#define IS_4K_ALIGNED(addr)    ((((unsigned long)addr) & ~MASK_4K) == 0)
 
 /**
  * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -276,40 +284,42 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
  * the number of entries which are aligned correctly. Supports the case where
  * consecutive SG elements are actually fragments of the same physcial page.
  */
-static int iser_data_buf_aligned_len(struct iser_data_buf *data,
-                                     struct ib_device *ibdev)
+static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
+                                             struct ib_device *ibdev)
 {
-       struct scatterlist *sgl, *sg, *next_sg = NULL;
-       u64 start_addr, end_addr;
-       int i, ret_len, start_check = 0;
-
-       if (data->dma_nents == 1)
-               return 1;
+       struct scatterlist *sgl, *sg;
+       u64 end_addr, next_addr;
+       int i, cnt;
+       unsigned int ret_len = 0;
 
        sgl = (struct scatterlist *)data->buf;
-       start_addr  = ib_sg_dma_address(ibdev, sgl);
 
+       cnt = 0;
        for_each_sg(sgl, sg, data->dma_nents, i) {
-               if (start_check && !IS_4K_ALIGNED(start_addr))
-                       break;
-
-               next_sg = sg_next(sg);
-               if (!next_sg)
-                       break;
-
-               end_addr    = start_addr + ib_sg_dma_len(ibdev, sg);
-               start_addr  = ib_sg_dma_address(ibdev, next_sg);
-
-               if (end_addr == start_addr) {
-                       start_check = 0;
-                       continue;
-               } else
-                       start_check = 1;
-
-               if (!IS_4K_ALIGNED(end_addr))
-                       break;
+               /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
+                  "offset: %ld sz: %ld\n", i,
+                  (unsigned long)sg_phys(sg),
+                  (unsigned long)sg->offset,
+                  (unsigned long)sg->length); */
+               end_addr = ib_sg_dma_address(ibdev, sg) +
+                          ib_sg_dma_len(ibdev, sg);
+               /* iser_dbg("Checking sg iobuf end address "
+                      "0x%08lX\n", end_addr); */
+               if (i + 1 < data->dma_nents) {
+                       next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
+                       /* are i, i+1 fragments of the same page? */
+                       if (end_addr == next_addr) {
+                               cnt++;
+                               continue;
+                       } else if (!IS_4K_ALIGNED(end_addr)) {
+                               ret_len = cnt + 1;
+                               break;
+                       }
+               }
+               cnt++;
        }
-       ret_len = (next_sg) ? i : i+1;
+       if (i == data->dma_nents)
+               ret_len = cnt;  /* loop ended */
        iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
                 ret_len, data->dma_nents, data);
        return ret_len;
index c82ae82cc43f88d92cd67ce50fde833af23aa88a..2266ecbfbc010789390c9a5ebb36b72d0960526e 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/mutex.h>
 #include <linux/rcupdate.h>
 #include <linux/smp_lock.h>
-#include "input-compat.h"
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
 MODULE_DESCRIPTION("Input core");
@@ -759,40 +758,6 @@ static int input_attach_handler(struct input_dev *dev, struct input_handler *han
        return error;
 }
 
-#ifdef CONFIG_COMPAT
-
-static int input_bits_to_string(char *buf, int buf_size,
-                               unsigned long bits, bool skip_empty)
-{
-       int len = 0;
-
-       if (INPUT_COMPAT_TEST) {
-               u32 dword = bits >> 32;
-               if (dword || !skip_empty)
-                       len += snprintf(buf, buf_size, "%x ", dword);
-
-               dword = bits & 0xffffffffUL;
-               if (dword || !skip_empty || len)
-                       len += snprintf(buf + len, max(buf_size - len, 0),
-                                       "%x", dword);
-       } else {
-               if (bits || !skip_empty)
-                       len += snprintf(buf, buf_size, "%lx", bits);
-       }
-
-       return len;
-}
-
-#else /* !CONFIG_COMPAT */
-
-static int input_bits_to_string(char *buf, int buf_size,
-                               unsigned long bits, bool skip_empty)
-{
-       return bits || !skip_empty ?
-               snprintf(buf, buf_size, "%lx", bits) : 0;
-}
-
-#endif
 
 #ifdef CONFIG_PROC_FS
 
@@ -861,25 +826,14 @@ static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
                                   unsigned long *bitmap, int max)
 {
        int i;
-       bool skip_empty = true;
-       char buf[18];
 
-       seq_printf(seq, "B: %s=", name);
-
-       for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
-               if (input_bits_to_string(buf, sizeof(buf),
-                                        bitmap[i], skip_empty)) {
-                       skip_empty = false;
-                       seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
-               }
-       }
-
-       /*
-        * If no output was produced print a single 0.
-        */
-       if (skip_empty)
-               seq_puts(seq, "0");
+       for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
+               if (bitmap[i])
+                       break;
 
+       seq_printf(seq, "B: %s=", name);
+       for (; i >= 0; i--)
+               seq_printf(seq, "%lx%s", bitmap[i], i > 0 ? " " : "");
        seq_putc(seq, '\n');
 }
 
@@ -1168,23 +1122,14 @@ static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
 {
        int i;
        int len = 0;
-       bool skip_empty = true;
-
-       for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
-               len += input_bits_to_string(buf + len, max(buf_size - len, 0),
-                                           bitmap[i], skip_empty);
-               if (len) {
-                       skip_empty = false;
-                       if (i > 0)
-                               len += snprintf(buf + len, max(buf_size - len, 0), " ");
-               }
-       }
 
-       /*
-        * If no output was produced print a single 0.
-        */
-       if (len == 0)
-               len = snprintf(buf, buf_size, "%d", 0);
+       for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
+               if (bitmap[i])
+                       break;
+
+       for (; i >= 0; i--)
+               len += snprintf(buf + len, max(buf_size - len, 0),
+                               "%lx%s", bitmap[i], i > 0 ? " " : "");
 
        if (add_cr)
                len += snprintf(buf + len, max(buf_size - len, 0), "\n");
@@ -1199,8 +1144,7 @@ static ssize_t input_dev_show_cap_##bm(struct device *dev,                \
 {                                                                      \
        struct input_dev *input_dev = to_input_dev(dev);                \
        int len = input_print_bitmap(buf, PAGE_SIZE,                    \
-                                    input_dev->bm##bit, ev##_MAX,      \
-                                    true);                             \
+                                    input_dev->bm##bit, ev##_MAX, 1);  \
        return min_t(int, len, PAGE_SIZE);                              \
 }                                                                      \
 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
@@ -1264,7 +1208,7 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
 
        len = input_print_bitmap(&env->buf[env->buflen - 1],
                                 sizeof(env->buf) - env->buflen,
-                                bitmap, max, false);
+                                bitmap, max, 0);
        if (len >= (sizeof(env->buf) - env->buflen))
                return -ENOMEM;
 
index 93c60e0f2b8e6a2432f06aec1ea81da0fdd6af70..b1bd6dd322864d0bd04c2f9a48c9f80b3217dcce 100644 (file)
@@ -481,9 +481,6 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
 
        memcpy(joydev->abspam, abspam, len);
 
-       for (i = 0; i < joydev->nabs; i++)
-               joydev->absmap[joydev->abspam[i]] = i;
-
  out:
        kfree(abspam);
        return retval;
index 2cfbc1752605c05241326df2c553fd745d34ab8a..9a2977c216967d3c7a23daca744e8f136663c213 100644 (file)
  */
 #define TWL4030_MAX_ROWS       8       /* TWL4030 hard limit */
 #define TWL4030_MAX_COLS       8
-/*
- * Note that we add space for an extra column so that we can handle
- * row lines connected to the gnd (see twl4030_col_xlate()).
- */
-#define TWL4030_ROW_SHIFT      4
-#define TWL4030_KEYMAP_SIZE    (TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT)
+#define TWL4030_ROW_SHIFT      3
+#define TWL4030_KEYMAP_SIZE    (TWL4030_MAX_ROWS * TWL4030_MAX_COLS)
 
 struct twl4030_keypad {
        unsigned short  keymap[TWL4030_KEYMAP_SIZE];
@@ -185,7 +181,7 @@ static int twl4030_read_kp_matrix_state(struct twl4030_keypad *kp, u16 *state)
        return ret;
 }
 
-static bool twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
+static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
 {
        int i;
        u16 check = 0;
@@ -194,12 +190,12 @@ static bool twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
                u16 col = key_state[i];
 
                if ((col & check) && hweight16(col) > 1)
-                       return true;
+                       return 1;
 
                check |= col;
        }
 
-       return false;
+       return 0;
 }
 
 static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
@@ -228,8 +224,7 @@ static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
                if (!changed)
                        continue;
 
-               /* Extra column handles "all gnd" rows */
-               for (col = 0; col < kp->n_cols + 1; col++) {
+               for (col = 0; col < kp->n_cols; col++) {
                        int code;
 
                        if (!(changed & (1 << col)))
index 0c99db075e8f140d7670e6ad1d09c20e556a723c..fc8823bcf20c162f29c993851d2f8c1705bb8d9e 100644 (file)
@@ -62,8 +62,6 @@ static const struct alps_model_info alps_model_data[] = {
        { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
                ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
        { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 },               /* Dell Vostro 1400 */
-       { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },      /* Toshiba Tecra A11-11L */
 };
 
 /*
index 9451e28701f8d414721d881d91d83c2941a24cea..0876d82cecfc2a99737d24e13cf40945b795bbeb 100644 (file)
@@ -1349,7 +1349,6 @@ static int psmouse_reconnect(struct serio *serio)
        struct psmouse *psmouse = serio_get_drvdata(serio);
        struct psmouse *parent = NULL;
        struct serio_driver *drv = serio->drv;
-       unsigned char type;
        int rc = -1;
 
        if (!drv || !psmouse) {
@@ -1369,15 +1368,10 @@ static int psmouse_reconnect(struct serio *serio)
        if (psmouse->reconnect) {
                if (psmouse->reconnect(psmouse))
                        goto out;
-       } else {
-               psmouse_reset(psmouse);
-
-               if (psmouse_probe(psmouse) < 0)
-                       goto out;
-
-               type = psmouse_extensions(psmouse, psmouse_max_proto, false);
-               if (psmouse->type != type)
-                       goto out;
+       } else if (psmouse_probe(psmouse) < 0 ||
+                  psmouse->type != psmouse_extensions(psmouse,
+                                               psmouse_max_proto, false)) {
+               goto out;
        }
 
        /* ok, the device type (and capabilities) match the old one,
index 21ef4b59a8183d24abb7916b84420b38d8d4562d..2a5982e532f81aea8ec28fca40403c2f68150723 100644 (file)
@@ -165,13 +165,6 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
                },
        },
-       {
-               /* Gigabyte Spring Peak - defines wrong chassis type */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
-               },
-       },
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -328,13 +321,6 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
                },
        },
-       {
-               /* Sony Vaio VPCZ122GX */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "VPCZ122GX"),
-               },
-       },
        {
                /* Sony Vaio FS-115b */
                .matches = {
@@ -455,13 +441,6 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
                },
        },
-       {
-               /* Medion Akoya E1222 */
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
-               },
-       },
        {
                /* Mivvy M310 */
                .matches = {
index 16f5ab2ee034be2a24b087e175ba4ea4e1d6bb17..1df02d25aca5e4843cc6b02988f93dc0c05f2803 100644 (file)
@@ -1412,8 +1412,8 @@ static int __init i8042_init(void)
 
 static void __exit i8042_exit(void)
 {
-       platform_device_unregister(i8042_platform_device);
        platform_driver_unregister(&i8042_driver);
+       platform_device_unregister(i8042_platform_device);
        i8042_platform_exit();
 
        panic_blink = NULL;
index e6307ba452ea5439f08b963db4e4363159dae1ee..9114ae1c7488a477aaaad4b7e983890d5b3ef2a0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/input/tablet/wacom.h
  *
- *  USB Wacom tablet support
+ *  USB Wacom Graphire and Wacom Intuos tablet support
  *
  *  Copyright (c) 2000-2004 Vojtech Pavlik     <vojtech@ucw.cz>
  *  Copyright (c) 2000 Andreas Bach Aaen       <abach@stofanet.dk>
@@ -69,7 +69,6 @@
  *      v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
  *      v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
  *      v1.51 (pc) - Added support for Intuos4
- *      v1.52 (pc) - Query Wacom data upon system resume
  */
 
 /*
@@ -90,9 +89,9 @@
 /*
  * Version Information
  */
-#define DRIVER_VERSION "v1.52"
+#define DRIVER_VERSION "v1.51"
 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
-#define DRIVER_DESC "USB Wacom tablet driver"
+#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
 #define DRIVER_LICENSE "GPL"
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
index 69fc4b8d8738e60f68634aad297247060d791e03..ea30c983a33efdbd6469cbd2fbc07fe308ade8fe 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/input/tablet/wacom_sys.c
  *
- *  USB Wacom tablet support - system specific code
+ *  USB Wacom Graphire and Wacom Intuos tablet support - system specific code
  */
 
 /*
@@ -562,15 +562,10 @@ static int wacom_resume(struct usb_interface *intf)
        int rv;
 
        mutex_lock(&wacom->lock);
-
-       /* switch to wacom mode first */
-       wacom_query_tablet_data(intf);
-
        if (wacom->open)
                rv = usb_submit_urb(wacom->irq, GFP_NOIO);
        else
                rv = 0;
-
        mutex_unlock(&wacom->lock);
 
        return rv;
index a0f7b99aee8be4b8d3aab15c03f7af6ecfb5005e..cc768caa38f54b86f48ec92310ed93e3b1fcf998 100644 (file)
@@ -1243,10 +1243,14 @@ static void do_action(int action, struct cardstate *cs,
                 * note that bcs may be NULL if no B channel is free
                 */
                at_state2->ConState = 700;
-               for (i = 0; i < STR_NUM; ++i) {
-                       kfree(at_state2->str_var[i]);
-                       at_state2->str_var[i] = NULL;
-               }
+               kfree(at_state2->str_var[STR_NMBR]);
+               at_state2->str_var[STR_NMBR] = NULL;
+               kfree(at_state2->str_var[STR_ZCPN]);
+               at_state2->str_var[STR_ZCPN] = NULL;
+               kfree(at_state2->str_var[STR_ZBC]);
+               at_state2->str_var[STR_ZBC] = NULL;
+               kfree(at_state2->str_var[STR_ZHLC]);
+               at_state2->str_var[STR_ZHLC] = NULL;
                at_state2->int_var[VAR_ZCTP] = -1;
 
                spin_lock_irqsave(&cs->lock, flags);
index b3065b8b24568674ae7b5c64504f6d9a5e3ca360..6a8e1384e7bd09b6441ef6a27eaed5c497f31061 100644 (file)
@@ -635,6 +635,7 @@ void gigaset_if_receive(struct cardstate *cs,
        if ((tty = cs->tty) == NULL)
                gig_dbg(DEBUG_ANY, "receive on closed device");
        else {
+               tty_buffer_request_room(tty, len);
                tty_insert_flip_string(tty, buffer, len);
                tty_flip_buffer_push(tty);
        }
index 2655e3aab8952603fc0e4a50f3e8879525d01b52..1081091bbfaf74e95285c02bc8bf85ab075e5018 100644 (file)
@@ -174,7 +174,7 @@ int sc_ioctl(int card, scs_ioctl *data)
                pr_debug("%s: SCIOGETSPID: ioctl received\n",
                                sc_adapter[card]->devicename);
 
-               spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
+               spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
                if (!spid) {
                        kfree(rcvmsg);
                        return -ENOMEM;
@@ -194,7 +194,7 @@ int sc_ioctl(int card, scs_ioctl *data)
                        kfree(rcvmsg);
                        return status;
                }
-               strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE);
+               strcpy(spid, rcvmsg->msg_data.byte_array);
 
                /*
                 * Package the switch type and send to user space
@@ -272,12 +272,12 @@ int sc_ioctl(int card, scs_ioctl *data)
                        return status;
                }
 
-               dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL);
+               dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL);
                if (!dn) {
                        kfree(rcvmsg);
                        return -ENOMEM;
                }
-               strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE);
+               strcpy(dn, rcvmsg->msg_data.byte_array);
                kfree(rcvmsg);
 
                /*
@@ -348,7 +348,7 @@ int sc_ioctl(int card, scs_ioctl *data)
                pr_debug("%s: SCIOSTAT: ioctl received\n",
                                sc_adapter[card]->devicename);
 
-               bi = kzalloc(sizeof(boardInfo), GFP_KERNEL);
+               bi = kmalloc (sizeof(boardInfo), GFP_KERNEL);
                if (!bi) {
                        kfree(rcvmsg);
                        return -ENOMEM;
index 0823e2622e8c569013d2da5111d20a3cfb435be4..e5225d28f39245cbff44373f38947e3839847a6f 100644 (file)
@@ -211,6 +211,7 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
                                        const struct of_device_id *match)
 {
        struct device_node *np = ofdev->node, *child;
+       struct gpio_led led;
        struct gpio_led_of_platform_data *pdata;
        int count = 0, ret;
 
@@ -225,8 +226,8 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
        if (!pdata)
                return -ENOMEM;
 
+       memset(&led, 0, sizeof(led));
        for_each_child_of_node(np, child) {
-               struct gpio_led led = {};
                enum of_gpio_flags flags;
                const char *state;
 
index a564fe2eff14146c66aed69761df9ce75a714b7b..386a7972111d9f88b4e13285d1c810ef29647114 100644 (file)
@@ -90,8 +90,6 @@ static struct task_struct *thread_therm = NULL;
 
 static void write_both_fan_speed(struct thermostat *th, int speed);
 static void write_fan_speed(struct thermostat *th, int speed, int fan);
-static void thermostat_create_files(void);
-static void thermostat_remove_files(void);
 
 static int
 write_reg(struct thermostat* th, int reg, u8 data)
@@ -163,8 +161,6 @@ remove_thermostat(struct i2c_client *client)
        struct thermostat *th = i2c_get_clientdata(client);
        int i;
        
-       thermostat_remove_files();
-
        if (thread_therm != NULL) {
                kthread_stop(thread_therm);
        }
@@ -453,8 +449,6 @@ static int probe_thermostat(struct i2c_client *client,
                return -ENOMEM;
        }
 
-       thermostat_create_files();
-
        return 0;
 }
 
@@ -572,6 +566,7 @@ thermostat_init(void)
        struct device_node* np;
        const u32 *prop;
        int i = 0, offset = 0;
+       int err;
 
        np = of_find_node_by_name(NULL, "fan");
        if (!np)
@@ -638,17 +633,6 @@ thermostat_init(void)
                return -ENODEV;
        }
 
-#ifndef CONFIG_I2C_POWERMAC
-       request_module("i2c-powermac");
-#endif
-
-       return i2c_add_driver(&thermostat_driver);
-}
-
-static void thermostat_create_files(void)
-{
-       int err;
-
        err = device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature);
        err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature);
        err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_limit);
@@ -663,9 +647,16 @@ static void thermostat_create_files(void)
        if (err)
                printk(KERN_WARNING
                        "Failed to create tempertaure attribute file(s).\n");
+
+#ifndef CONFIG_I2C_POWERMAC
+       request_module("i2c-powermac");
+#endif
+
+       return i2c_add_driver(&thermostat_driver);
 }
 
-static void thermostat_remove_files(void)
+static void __exit
+thermostat_exit(void)
 {
        if (of_dev) {
                device_remove_file(&of_dev->dev, &dev_attr_sensor1_temperature);
@@ -682,14 +673,9 @@ static void thermostat_remove_files(void)
                        device_remove_file(&of_dev->dev,
                                           &dev_attr_sensor2_fan_speed);
 
+               of_device_unregister(of_dev);
        }
-}
-
-static void __exit
-thermostat_exit(void)
-{
        i2c_del_driver(&thermostat_driver);
-       of_device_unregister(of_dev);
 }
 
 module_init(thermostat_init);
index 47831536deb13a8a687d631f405b734eb639a88d..a5e5f2fbf96351baca4fb443a472f1f58692d379 100644 (file)
@@ -1317,8 +1317,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
 {
        if (!bitmap) return;
        if (behind) {
-               if (atomic_dec_and_test(&bitmap->behind_writes))
-                       wake_up(&bitmap->behind_wait);
+               atomic_dec(&bitmap->behind_writes);
                PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
                  atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
        }
@@ -1630,7 +1629,6 @@ int bitmap_create(mddev_t *mddev)
        atomic_set(&bitmap->pending_writes, 0);
        init_waitqueue_head(&bitmap->write_wait);
        init_waitqueue_head(&bitmap->overflow_wait);
-       init_waitqueue_head(&bitmap->behind_wait);
 
        bitmap->mddev = mddev;
 
index 86950bc527afd7c696f5fc2b89543b46cca33193..7e38d13ddcacce54b47206f191af739ce84d436c 100644 (file)
@@ -254,9 +254,6 @@ struct bitmap {
        wait_queue_head_t write_wait;
        wait_queue_head_t overflow_wait;
 
-#ifndef __GENKSYMS__
-       wait_queue_head_t behind_wait;
-#endif
 };
 
 /* the bitmap API */
index 5f9315b32a42aed67da57a7de2caf97bfdcb8fd1..8a223a48802c0c867f79818045399937e650bf5e 100644 (file)
@@ -162,7 +162,7 @@ static inline sector_t get_dev_size(struct block_device *bdev)
 static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
                                      sector_t sector)
 {
-       return sector >> store->chunk_shift;
+       return (sector & ~store->chunk_mask) >> store->chunk_shift;
 }
 
 int dm_exception_store_type_register(struct dm_exception_store_type *type);
index 818b617ab3b28b1fcb687266f23a886ff4651e74..d19854c98184de70a6ba7bb5432543eecd212a04 100644 (file)
@@ -249,46 +249,40 @@ static void __hash_remove(struct hash_cell *hc)
 
 static void dm_hash_remove_all(int keep_open_devices)
 {
-       int i, dev_skipped;
+       int i, dev_skipped, dev_removed;
        struct hash_cell *hc;
-       struct mapped_device *md;
-
-retry:
-       dev_skipped = 0;
+       struct list_head *tmp, *n;
 
        down_write(&_hash_lock);
 
+retry:
+       dev_skipped = dev_removed = 0;
        for (i = 0; i < NUM_BUCKETS; i++) {
-               list_for_each_entry(hc, _name_buckets + i, name_list) {
-                       md = hc->md;
-                       dm_get(md);
+               list_for_each_safe (tmp, n, _name_buckets + i) {
+                       hc = list_entry(tmp, struct hash_cell, name_list);
 
-                       if (keep_open_devices && dm_lock_for_deletion(md)) {
-                               dm_put(md);
+                       if (keep_open_devices &&
+                           dm_lock_for_deletion(hc->md)) {
                                dev_skipped++;
                                continue;
                        }
-
                        __hash_remove(hc);
-
-                       up_write(&_hash_lock);
-
-                       dm_put(md);
-
-                       /*
-                        * Some mapped devices may be using other mapped
-                        * devices, so repeat until we make no further
-                        * progress.  If a new mapped device is created
-                        * here it will also get removed.
-                        */
-                       goto retry;
+                       dev_removed = 1;
                }
        }
 
-       up_write(&_hash_lock);
+       /*
+        * Some mapped devices may be using other mapped devices, so if any
+        * still exist, repeat until we make no further progress.
+        */
+       if (dev_skipped) {
+               if (dev_removed)
+                       goto retry;
 
-       if (dev_skipped)
                DMWARN("remove_all left %d open device(s)", dev_skipped);
+       }
+
+       up_write(&_hash_lock);
 }
 
 static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
index f336c695908217610a0d4a66e0ae2e27a68e85a0..32d0b878ecccb5ac3b7878d68adc677ebc6aa880 100644 (file)
@@ -691,7 +691,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
 
                if (as->argc < nr_params) {
                        ti->error = "not enough path parameters";
-                       r = -EINVAL;
                        goto bad;
                }
 
index d7786e3514cda7dbd68856ce9e8cea1b14a6e6c1..724efc63904dd5459b5e675a5ead3f5aba0394f6 100644 (file)
@@ -614,10 +614,8 @@ static void dec_pending(struct dm_io *io, int error)
                        if (!md->barrier_error && io_error != -EOPNOTSUPP)
                                md->barrier_error = io_error;
                        end_io_acct(io);
-                       free_io(md, io);
                } else {
                        end_io_acct(io);
-                       free_io(md, io);
 
                        if (io_error != DM_ENDIO_REQUEUE) {
                                trace_block_bio_complete(md->queue, bio);
@@ -625,6 +623,8 @@ static void dec_pending(struct dm_io *io, int error)
                                bio_endio(bio, io_error);
                        }
                }
+
+               free_io(md, io);
        }
 }
 
@@ -1487,15 +1487,10 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
        return BLKPREP_OK;
 }
 
-/*
- * Returns:
- * 0  : the request has been processed (not requeued)
- * !0 : the request has been requeued
- */
-static int map_request(struct dm_target *ti, struct request *rq,
-                      struct mapped_device *md)
+static void map_request(struct dm_target *ti, struct request *rq,
+                       struct mapped_device *md)
 {
-       int r, requeued = 0;
+       int r;
        struct request *clone = rq->special;
        struct dm_rq_target_io *tio = clone->end_io_data;
 
@@ -1521,7 +1516,6 @@ static int map_request(struct dm_target *ti, struct request *rq,
        case DM_MAPIO_REQUEUE:
                /* The target wants to requeue the I/O */
                dm_requeue_unmapped_request(clone);
-               requeued = 1;
                break;
        default:
                if (r > 0) {
@@ -1533,8 +1527,6 @@ static int map_request(struct dm_target *ti, struct request *rq,
                dm_kill_unmapped_request(clone, r);
                break;
        }
-
-       return requeued;
 }
 
 /*
@@ -1576,17 +1568,12 @@ static void dm_request_fn(struct request_queue *q)
 
                blk_start_request(rq);
                spin_unlock(q->queue_lock);
-               if (map_request(ti, rq, md))
-                       goto requeued;
-
+               map_request(ti, rq, md);
                spin_lock_irq(q->queue_lock);
        }
 
        goto out;
 
-requeued:
-       spin_lock_irq(q->queue_lock);
-
 plug_and_out:
        if (!elv_queue_empty(q))
                /* Some requests still remain, retry later */
index dff9d2f449c380363287b567c9b5e1ad79ee7ee1..1ceceb334d5ebe8f5ce637d29898fc984bfcc797 100644 (file)
@@ -172,14 +172,12 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit max_phys_segments to 1 lying within
-                * a single page.
+                * violating it, so limit ->max_sector to one PAGE, as
+                * a one page request is never in violation.
                 */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_phys_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
+               if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                conf->array_sectors += rdev->sectors;
                cnt++;
index 0352746824596940713760f57bff737e10aae0b8..08f7471d015039dc125fac9c7f0bc79a0594eb02 100644 (file)
@@ -1122,7 +1122,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
        md_super_wait(rdev->mddev);
-       return num_sectors;
+       return num_sectors / 2; /* kB for sysfs */
 }
 
 
@@ -1485,7 +1485,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
        md_super_wait(rdev->mddev);
-       return num_sectors;
+       return num_sectors / 2; /* kB for sysfs */
 }
 
 static struct super_type super_types[] = {
@@ -2011,18 +2011,12 @@ repeat:
                if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
                        /* .. if the array isn't clean, an 'even' event must also go
                         * to spares. */
-                       if ((mddev->events&1)==0) {
+                       if ((mddev->events&1)==0)
                                nospares = 0;
-                               sync_req = 2; /* force a second update to get the
-                                              * even/odd in sync */
-                       }
                } else {
                        /* otherwise an 'odd' event must go to spares */
-                       if ((mddev->events&1)) {
+                       if ((mddev->events&1))
                                nospares = 0;
-                               sync_req = 2; /* force a second update to get the
-                                              * even/odd in sync */
-                       }
                }
        }
 
@@ -5334,7 +5328,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
        int err = 0;
        void __user *argp = (void __user *)arg;
        mddev_t *mddev = NULL;
-       int ro;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
@@ -5470,34 +5463,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        err = do_md_stop(mddev, 1, 1);
                        goto done_unlock;
 
-               case BLKROSET:
-                       if (get_user(ro, (int __user *)(arg))) {
-                               err = -EFAULT;
-                               goto done_unlock;
-                       }
-                       err = -EINVAL;
-
-                       /* if the bdev is going readonly the value of mddev->ro
-                        * does not matter, no writes are coming
-                        */
-                       if (ro)
-                               goto done_unlock;
-
-                       /* are we are already prepared for writes? */
-                       if (mddev->ro != 1)
-                               goto done_unlock;
-
-                       /* transitioning to readauto need only happen for
-                        * arrays that call md_write_start
-                        */
-                       if (mddev->pers) {
-                               err = restart_array(mddev);
-                               if (err == 0) {
-                                       mddev->ro = 2;
-                                       set_disk_ro(mddev->gendisk, 0);
-                               }
-                       }
-                       goto done_unlock;
        }
 
        /*
index e4b11f18adc7d978947d4da9bb992b8cec334559..ee7646f974a07165bd368deb3a18f6390e5553dc 100644 (file)
@@ -301,16 +301,14 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                                          rdev->data_offset << 9);
 
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit ->max_phys_segments to one, lying
-                * within a single page.
+                * violating it, so limit ->max_sector to one PAGE, as
+                * a one page request is never in violation.
                 * (Note: it is very unlikely that a device with
                 * merge_bvec_fn will be involved in multipath.)
                 */
-                       if (q->merge_bvec_fn) {
-                               blk_queue_max_phys_segments(mddev->queue, 1);
-                               blk_queue_segment_boundary(mddev->queue,
-                                                          PAGE_CACHE_SIZE - 1);
-                       }
+                       if (q->merge_bvec_fn &&
+                           queue_max_sectors(q) > (PAGE_SIZE>>9))
+                               blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        conf->working_disks++;
                        mddev->degraded--;
@@ -478,11 +476,9 @@ static int multipath_run (mddev_t *mddev)
                /* as we don't honour merge_bvec_fn, we must never risk
                 * violating it, not that we ever expect a device with
                 * a merge_bvec_fn to be involved in multipath */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_phys_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
+               if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                if (!test_bit(Faulty, &rdev->flags))
                        conf->working_disks++;
index 3db857cdd33e14bc0b6430df75b1da8783bad435..d3a4ce06015a300e9d4df5e95ee63669613db17e 100644 (file)
@@ -176,15 +176,14 @@ static int create_strip_zones(mddev_t *mddev)
                disk_stack_limits(mddev->gendisk, rdev1->bdev,
                                  rdev1->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit ->max_phys_segments to 1, lying within
-                * a single page.
+                * violating it, so limit ->max_sector to one PAGE, as
+                * a one page request is never in violation.
                 */
 
-               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_phys_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
+               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+
                if (!smallest || (rdev1->sectors < smallest->sectors))
                        smallest = rdev1;
                cnt++;
index 968cb14b63c0dc82cc5a40dd9b6a451581c14241..e07ce2e033a95c48ba2c9a78c519640743220e2d 100644 (file)
@@ -417,7 +417,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
  */
 static int read_balance(conf_t *conf, r1bio_t *r1_bio)
 {
-       const sector_t this_sector = r1_bio->sector;
+       const unsigned long this_sector = r1_bio->sector;
        int new_disk = conf->last_used, disk = new_disk;
        int wonly_disk = -1;
        const int sectors = r1_bio->sectors;
@@ -433,7 +433,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
  retry:
        if (conf->mddev->recovery_cp < MaxSector &&
            (this_sector + sectors >= conf->next_resync)) {
-               /* Choose the first operational device, for consistancy */
+               /* Choose the first operation device, for consistancy */
                new_disk = 0;
 
                for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
@@ -845,15 +845,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
                }
                mirror = conf->mirrors + rdisk;
 
-               if (test_bit(WriteMostly, &mirror->rdev->flags) &&
-                   bitmap) {
-                       /* Reading from a write-mostly device must
-                        * take care not to over-take any writes
-                        * that are 'behind'
-                        */
-                       wait_event(bitmap->behind_wait,
-                                  atomic_read(&bitmap->behind_writes) == 0);
-               }
                r1_bio->read_disk = rdisk;
 
                read_bio = bio_clone(bio, GFP_NOIO);
@@ -900,10 +891,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
                        if (test_bit(Faulty, &rdev->flags)) {
                                rdev_dec_pending(rdev, mddev);
                                r1_bio->bios[i] = NULL;
-                       } else {
+                       } else
                                r1_bio->bios[i] = bio;
-                               targets++;
-                       }
+                       targets++;
                } else
                        r1_bio->bios[i] = NULL;
        }
@@ -931,13 +921,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
                set_bit(R1BIO_Degraded, &r1_bio->state);
        }
 
-       /* do behind I/O ?
-        * Not if there are too many, or cannot allocate memory,
-        * or a reader on WriteMostly is waiting for behind writes
-        * to flush */
+       /* do behind I/O ? */
        if (bitmap &&
            atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
-           !waitqueue_active(&bitmap->behind_wait) &&
            (behind_pages = alloc_behind_pages(bio)) != NULL)
                set_bit(R1BIO_BehindIO, &r1_bio->state);
 
@@ -1188,7 +1174,6 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
                 * is not possible.
                 */
                if (!test_bit(Faulty, &rdev->flags) &&
-                   !mddev->recovery_disabled &&
                    mddev->degraded < conf->raid_disks) {
                        err = -EBUSY;
                        goto abort;
@@ -2119,13 +2104,15 @@ static int stop(mddev_t *mddev)
 {
        conf_t *conf = mddev->private;
        struct bitmap *bitmap = mddev->bitmap;
+       int behind_wait = 0;
 
        /* wait for behind writes to complete */
-       if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
-               printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop.\n", mdname(mddev));
+       while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
+               behind_wait++;
+               printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(HZ); /* wait a second */
                /* need to kick something here to make sure I/O goes? */
-               wait_event(bitmap->behind_wait,
-                          atomic_read(&bitmap->behind_writes) == 0);
        }
 
        raise_barrier(conf);
index 1b4e232bce3c77d75461592a7228a2f31aff8721..c2cb7b87b440dce777d92bb6c215d1c82efddbe1 100644 (file)
@@ -493,7 +493,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
  */
 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
 {
-       const sector_t this_sector = r10_bio->sector;
+       const unsigned long this_sector = r10_bio->sector;
        int disk, slot, nslot;
        const int sectors = r10_bio->sectors;
        sector_t new_distance, current_distance;
@@ -824,29 +824,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
                 */
                bp = bio_split(bio,
                               chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
-
-               /* Each of these 'make_request' calls will call 'wait_barrier'.
-                * If the first succeeds but the second blocks due to the resync
-                * thread raising the barrier, we will deadlock because the
-                * IO to the underlying device will be queued in generic_make_request
-                * and will never complete, so will never reduce nr_pending.
-                * So increment nr_waiting here so no new raise_barriers will
-                * succeed, and so the second wait_barrier cannot block.
-                */
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting++;
-               spin_unlock_irq(&conf->resync_lock);
-
                if (make_request(q, &bp->bio1))
                        generic_make_request(&bp->bio1);
                if (make_request(q, &bp->bio2))
                        generic_make_request(&bp->bio2);
 
-               spin_lock_irq(&conf->resync_lock);
-               conf->nr_waiting--;
-               wake_up(&conf->wait_barrier);
-               spin_unlock_irq(&conf->resync_lock);
-
                bio_pair_release(bp);
                return 0;
        bad_map:
@@ -1173,17 +1155,13 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
-                       /* as we don't honour merge_bvec_fn, we must
-                        * never risk violating it, so limit
-                        * ->max_phys_segments to one lying with a single
-                        * page, as a one page request is never in
-                        * violation.
+                       /* as we don't honour merge_bvec_fn, we must never risk
+                        * violating it, so limit ->max_sector to one PAGE, as
+                        * a one page request is never in violation.
                         */
-                       if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                               blk_queue_max_phys_segments(mddev->queue, 1);
-                               blk_queue_segment_boundary(mddev->queue,
-                                                          PAGE_CACHE_SIZE - 1);
-                       }
+                       if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+                           queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+                               blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        p->head_position = 0;
                        rdev->raid_disk = mirror;
@@ -2177,14 +2155,12 @@ static int run(mddev_t *mddev)
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit max_phys_segments to 1 lying
-                * within a single page.
+                * violating it, so limit ->max_sector to one PAGE, as
+                * a one page request is never in violation.
                 */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_phys_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
+               if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                disk->head_position = 0;
        }
index 23949739fc257d7b270fc8688158dd746f9de68f..431b9b26ca5deda17ed117f634e1975556e009e0 100644 (file)
@@ -1526,7 +1526,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
                atomic_inc(&rdev->read_errors);
-               if (conf->mddev->degraded >= conf->max_degraded)
+               if (conf->mddev->degraded)
                        printk_rl(KERN_WARNING
                                  "raid5:%s: read error not correctable "
                                  "(sector %llu on %s).\n",
@@ -1649,8 +1649,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                                     int previous, int *dd_idx,
                                     struct stripe_head *sh)
 {
-       sector_t stripe, stripe2;
-       sector_t chunk_number;
+       long stripe;
+       unsigned long chunk_number;
        unsigned int chunk_offset;
        int pd_idx, qd_idx;
        int ddf_layout = 0;
@@ -1670,13 +1670,18 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
         */
        chunk_offset = sector_div(r_sector, sectors_per_chunk);
        chunk_number = r_sector;
+       BUG_ON(r_sector != chunk_number);
 
        /*
         * Compute the stripe number
         */
-       stripe = chunk_number;
-       *dd_idx = sector_div(stripe, data_disks);
-       stripe2 = stripe;
+       stripe = chunk_number / data_disks;
+
+       /*
+        * Compute the data disk and parity disk indexes inside the stripe
+        */
+       *dd_idx = chunk_number % data_disks;
+
        /*
         * Select the parity disk based on the user selected algorithm.
         */
@@ -1688,21 +1693,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
        case 5:
                switch (algorithm) {
                case ALGORITHM_LEFT_ASYMMETRIC:
-                       pd_idx = data_disks - sector_div(stripe2, raid_disks);
+                       pd_idx = data_disks - stripe % raid_disks;
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        break;
                case ALGORITHM_RIGHT_ASYMMETRIC:
-                       pd_idx = sector_div(stripe2, raid_disks);
+                       pd_idx = stripe % raid_disks;
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        break;
                case ALGORITHM_LEFT_SYMMETRIC:
-                       pd_idx = data_disks - sector_div(stripe2, raid_disks);
+                       pd_idx = data_disks - stripe % raid_disks;
                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
                        break;
                case ALGORITHM_RIGHT_SYMMETRIC:
-                       pd_idx = sector_div(stripe2, raid_disks);
+                       pd_idx = stripe % raid_disks;
                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
                        break;
                case ALGORITHM_PARITY_0:
@@ -1722,7 +1727,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
 
                switch (algorithm) {
                case ALGORITHM_LEFT_ASYMMETRIC:
-                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
+                       pd_idx = raid_disks - 1 - (stripe % raid_disks);
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1731,7 +1736,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                                (*dd_idx) += 2; /* D D P Q D */
                        break;
                case ALGORITHM_RIGHT_ASYMMETRIC:
-                       pd_idx = sector_div(stripe2, raid_disks);
+                       pd_idx = stripe % raid_disks;
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1740,12 +1745,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                                (*dd_idx) += 2; /* D D P Q D */
                        break;
                case ALGORITHM_LEFT_SYMMETRIC:
-                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
+                       pd_idx = raid_disks - 1 - (stripe % raid_disks);
                        qd_idx = (pd_idx + 1) % raid_disks;
                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
                        break;
                case ALGORITHM_RIGHT_SYMMETRIC:
-                       pd_idx = sector_div(stripe2, raid_disks);
+                       pd_idx = stripe % raid_disks;
                        qd_idx = (pd_idx + 1) % raid_disks;
                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
                        break;
@@ -1764,7 +1769,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                        /* Exactly the same as RIGHT_ASYMMETRIC, but or
                         * of blocks for computing Q is different.
                         */
-                       pd_idx = sector_div(stripe2, raid_disks);
+                       pd_idx = stripe % raid_disks;
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1779,8 +1784,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                         * D D D P Q  rather than
                         * Q D D D P
                         */
-                       stripe2 += 1;
-                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
+                       pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1792,7 +1796,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
 
                case ALGORITHM_ROTATING_N_CONTINUE:
                        /* Same as left_symmetric but Q is before P */
-                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
+                       pd_idx = raid_disks - 1 - (stripe % raid_disks);
                        qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
                        ddf_layout = 1;
@@ -1800,27 +1804,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
 
                case ALGORITHM_LEFT_ASYMMETRIC_6:
                        /* RAID5 left_asymmetric, with Q on last device */
-                       pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
+                       pd_idx = data_disks - stripe % (raid_disks-1);
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        qd_idx = raid_disks - 1;
                        break;
 
                case ALGORITHM_RIGHT_ASYMMETRIC_6:
-                       pd_idx = sector_div(stripe2, raid_disks-1);
+                       pd_idx = stripe % (raid_disks-1);
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        qd_idx = raid_disks - 1;
                        break;
 
                case ALGORITHM_LEFT_SYMMETRIC_6:
-                       pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
+                       pd_idx = data_disks - stripe % (raid_disks-1);
                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
                        qd_idx = raid_disks - 1;
                        break;
 
                case ALGORITHM_RIGHT_SYMMETRIC_6:
-                       pd_idx = sector_div(stripe2, raid_disks-1);
+                       pd_idx = stripe % (raid_disks-1);
                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
                        qd_idx = raid_disks - 1;
                        break;
@@ -1865,14 +1869,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
                                 : conf->algorithm;
        sector_t stripe;
        int chunk_offset;
-       sector_t chunk_number;
-       int dummy1, dd_idx = i;
+       int chunk_number, dummy1, dd_idx = i;
        sector_t r_sector;
        struct stripe_head sh2;
 
 
        chunk_offset = sector_div(new_sector, sectors_per_chunk);
        stripe = new_sector;
+       BUG_ON(new_sector != stripe);
 
        if (i == sh->pd_idx)
                return 0;
@@ -1965,7 +1969,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
        }
 
        chunk_number = stripe * data_disks + i;
-       r_sector = chunk_number * sectors_per_chunk + chunk_offset;
+       r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
 
        check = raid5_compute_sector(conf, r_sector,
                                     previous, &dummy1, &sh2);
index 55e591daf717f4fc26efecf98923aca309cad449..0241a7c5c34a65ae53f28698936d06ae41df8c43 100644 (file)
@@ -350,7 +350,6 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
        const u8 *ts, *ts_end, *from_where = NULL;
        u8 ts_remain = 0, how_much = 0, new_ts = 1;
        struct ethhdr *ethh = NULL;
-       bool error = false;
 
 #ifdef ULE_DEBUG
        /* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
@@ -460,16 +459,10 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
 
                                                /* Drop partly decoded SNDU, reset state, resync on PUSI. */
                                                if (priv->ule_skb) {
-                                                       error = true;
-                                                       dev_kfree_skb(priv->ule_skb);
-                                               }
-
-                                               if (error || priv->ule_sndu_remain) {
+                                                       dev_kfree_skb( priv->ule_skb );
                                                        dev->stats.rx_errors++;
                                                        dev->stats.rx_frame_errors++;
-                                                       error = false;
                                                }
-
                                                reset_ule(priv);
                                                priv->need_pusi = 1;
                                                continue;
@@ -511,7 +504,6 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
                                       "bytes left in TS.  Resyncing.\n", ts_remain);
                                priv->ule_sndu_len = 0;
                                priv->need_pusi = 1;
-                               ts += TS_SZ;
                                continue;
                        }
 
@@ -541,7 +533,6 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
                                from_where += 2;
                        }
 
-                       priv->ule_sndu_remain = priv->ule_sndu_len + 2;
                        /*
                         * State of current TS:
                         *   ts_remain (remaining bytes in the current TS cell)
@@ -551,7 +542,6 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
                         */
                        switch (ts_remain) {
                                case 1:
-                                       priv->ule_sndu_remain--;
                                        priv->ule_sndu_type = from_where[0] << 8;
                                        priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
                                        ts_remain -= 1; from_where += 1;
@@ -565,7 +555,6 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
                                default: /* complete ULE header is present in current TS. */
                                        /* Extract ULE type field. */
                                        if (priv->ule_sndu_type_1) {
-                                               priv->ule_sndu_type_1 = 0;
                                                priv->ule_sndu_type |= from_where[0];
                                                from_where += 1; /* points to payload start. */
                                                ts_remain -= 1;
index 690823fc1cae802f486581ce213702a9c28060e1..0e4b97fba384bb837bd1946d1fc502ce03f46dc2 100644 (file)
@@ -112,8 +112,8 @@ config DVB_USB_CXUSB
        select DVB_MT352 if !DVB_FE_CUSTOMISE
        select DVB_ZL10353 if !DVB_FE_CUSTOMISE
        select DVB_DIB7000P if !DVB_FE_CUSTOMISE
+       select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE
        select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
-       select DVB_LGS8GXX if !DVB_FE_CUSTOMISE
        select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
        select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
        select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
index 445fa10680644c9d546d5aa8644947e6b5587573..3051b64aa17c6bd3c8d7b3fbaad1727908d92b26 100644 (file)
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
        spi_bias *= qam_tab[p->constellation];
        spi_bias /= p->code_rate_HP + 1;
        spi_bias /= (guard_tab[p->guard_interval] + 32);
-       spi_bias *= 1000;
-       spi_bias /= 1000 + ppm/1000;
+       spi_bias *= 1000ULL;
+       spi_bias /= 1000ULL + ppm/1000;
        spi_bias *= p->code_rate_HP;
 
        val0x04 = (p->transmission_mode << 2) | p->guard_interval;
index 32a7ec65ec42f1c40fbb4e4576b56cded5045902..d8d4214fd65f2c7af9d3af1a263a30f9dc24e3a9 100644 (file)
@@ -68,14 +68,13 @@ config DVB_BUDGET
        select DVB_VES1820 if !DVB_FE_CUSTOMISE
        select DVB_L64781 if !DVB_FE_CUSTOMISE
        select DVB_TDA8083 if !DVB_FE_CUSTOMISE
+       select DVB_TDA10021 if !DVB_FE_CUSTOMISE
+       select DVB_TDA10023 if !DVB_FE_CUSTOMISE
        select DVB_S5H1420 if !DVB_FE_CUSTOMISE
        select DVB_TDA10086 if !DVB_FE_CUSTOMISE
        select DVB_TDA826X if !DVB_FE_CUSTOMISE
        select DVB_LNBP21 if !DVB_FE_CUSTOMISE
        select DVB_TDA1004X if !DVB_FE_CUSTOMISE
-       select DVB_ISL6423 if !DVB_FE_CUSTOMISE
-       select DVB_STV090x if !DVB_FE_CUSTOMISE
-       select DVB_STV6110x if !DVB_FE_CUSTOMISE
        help
          Support for simple SAA7146 based DVB cards (so called Budget-
          or Nova-PCI cards) without onboard MPEG2 decoder, and without
index 95a463c1ef85f20b2efce59f1ff4cb8558406c75..e48380c48990165955a9dadeb80d90a758aa6e46 100644 (file)
@@ -643,6 +643,9 @@ static void frontend_init(struct budget *budget)
                                        &budget->i2c_adap,
                                        &tt1600_isl6423_config);
 
+                       } else {
+                               dvb_frontend_detach(budget->dvb_frontend);
+                               budget->dvb_frontend = NULL;
                        }
                }
                break;
index d258ed719b7d75e63895eb475d99082a9b9eebf6..a6724019c66f5d9d121edd285a89e73772e7e350 100644 (file)
@@ -4468,7 +4468,6 @@ static int __devinit bttv_probe(struct pci_dev *dev,
                request_modules(btv);
        }
 
-       init_bttv_i2c_ir(btv);
        bttv_input_init(btv);
 
        /* everything is fine */
index 3eb7c2952c8bd993349ab1cc4bd944ea8791b49a..beda363418b0e6f28c5bc1ed1342ef16a9676817 100644 (file)
@@ -388,12 +388,7 @@ int __devinit init_bttv_i2c(struct bttv *btv)
        if (0 == btv->i2c_rc && i2c_scan)
                do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
 
-       return btv->i2c_rc;
-}
-
-/* Instantiate the I2C IR receiver device, if present */
-void __devinit init_bttv_i2c_ir(struct bttv *btv)
-{
+       /* Instantiate the IR receiver device, if present */
        if (0 == btv->i2c_rc) {
                struct i2c_board_info info;
                /* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -413,6 +408,7 @@ void __devinit init_bttv_i2c_ir(struct bttv *btv)
                strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
                i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
        }
+       return btv->i2c_rc;
 }
 
 int __devexit fini_bttv_i2c(struct bttv *btv)
index 6cccc2a17eee074f121fee7aabbb581223df1905..a1d0e9c9f2866ea3a11b24f4357ac31547899b4b 100644 (file)
@@ -279,7 +279,6 @@ extern unsigned int bttv_debug;
 extern unsigned int bttv_gpio;
 extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
 extern int init_bttv_i2c(struct bttv *btv);
-extern void init_bttv_i2c_ir(struct bttv *btv);
 extern int fini_bttv_i2c(struct bttv *btv);
 
 #define bttv_printk if (bttv_verbose) printk
index dd30b9dad4a6ece711f09ff5cb8cd6464f681852..319c459459e0c5a0fb0f3323d0780b59a8ec65c6 100644 (file)
@@ -225,16 +225,14 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
                     dev->board.name, dev->model);
 
        /* set the direction for GPIO pins */
-       if (dev->board.tuner_gpio) {
-               cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
-               cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
-               cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
+       cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
+       cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
+       cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
 
-               /* request some modules if any required */
+       /* request some modules if any required */
 
-               /* reset the Tuner */
-               cx231xx_gpio_set(dev, dev->board.tuner_gpio);
-       }
+       /* reset the Tuner */
+       cx231xx_gpio_set(dev, dev->board.tuner_gpio);
 
        /* set the mode to Analog mode initially */
        cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
index d4746e06451692f8c853bf8c73924c87f6ad70b3..4172cb3874207428556d5d75e99d46e1d5f6a38f 100644 (file)
@@ -365,17 +365,7 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
 
                memset(&info, 0, sizeof(struct i2c_board_info));
                strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-               /*
-                * We can't call i2c_new_probed_device() because it uses
-                * quick writes for probing and the IR receiver device only
-                * replies to reads.
-                */
-               if (i2c_smbus_xfer(&bus->i2c_adap, addr_list[0], 0,
-                                  I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK,
-                                  NULL) >= 0) {
-                       info.addr = addr_list[0];
-                       i2c_new_device(&bus->i2c_adap, &info);
-               }
+               i2c_new_probed_device(&bus->i2c_adap, &info, addr_list);
        }
 
        return bus->i2c_rc;
index fb39f11845583a86dcb463745d66d498bc873efe..ee1ca39db06ad684758dc4da0bfa8b35aa8e1f56 100644 (file)
@@ -188,24 +188,10 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
                        0x18, 0x6b, 0x71,
                        I2C_CLIENT_END
                };
-               const unsigned short *addrp;
 
                memset(&info, 0, sizeof(struct i2c_board_info));
                strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-               /*
-                * We can't call i2c_new_probed_device() because it uses
-                * quick writes for probing and at least some R receiver
-                * devices only reply to reads.
-                */
-               for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) {
-                       if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
-                                          I2C_SMBUS_READ, 0,
-                                          I2C_SMBUS_QUICK, NULL) >= 0) {
-                               info.addr = *addrp;
-                               i2c_new_device(&core->i2c_adap, &info);
-                               break;
-                       }
-               }
+               i2c_new_probed_device(&core->i2c_adap, &info, addr_list);
        }
        return core->i2c_rc;
 }
index efddf15d498cac67ecbfdd4c557dbd19e2fb1e68..db749461e5c61126a73bbabd7710d66b8fea5455 100644 (file)
@@ -610,7 +610,6 @@ static int dvb_fini(struct em28xx *dev)
 
        if (dev->dvb) {
                unregister_dvb(dev->dvb);
-               kfree(dev->dvb);
                dev->dvb = NULL;
        }
 
index d61767cd7a519b1698c0e9b541e4eaea44843040..f8328b9efae53506ed4ff7822c1e999e46058105 100644 (file)
@@ -530,12 +530,6 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
                        {0x13, 0x00, {0x01}, 1},
                        {0, 0, {0}, 0}
                };
-               /* Without this command the cam won't work with USB-UHCI */
-               gspca_dev->usb_buf[0] = 0x0a;
-               gspca_dev->usb_buf[1] = 0x00;
-               err_code = mr_write(gspca_dev, 2);
-               if (err_code < 0)
-                       return err_code;
                err_code = sensor_write_regs(gspca_dev, cif_sensor1_init_data,
                                         ARRAY_SIZE(cif_sensor1_init_data));
        }
index 7878182a67b2c7ee9d82cf6b7e15446c363b3a4e..bfae63f5584c408f555f63b0a5ef8124fa566f5f 100644 (file)
@@ -497,6 +497,8 @@ static const __devinitdata struct usb_device_id device_table[] = {
        {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
        /* QuickCam Messenger (new) */
        {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
+       /* QuickCam Messenger (new) */
+       {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
        {}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
index 6b61bb67e9d1179c855b925099e9fc7d8720a604..fa6bb85cb4b06ebf7626f5dd6557b9d345107bee 100644 (file)
@@ -457,8 +457,6 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
                        struct fb_vblank vblank;
                        u32 trace;
 
-                       memset(&vblank, 0, sizeof(struct fb_vblank));
-
                        vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
                                        FB_VBLANK_HAVE_VSYNC;
                        trace = read_reg(0x028c0) >> 16;
index f7f7e04cf4853e02a5c0cd11d051433e6387f1a5..50b415e07edaba9280516850d8856a487cc29d44 100644 (file)
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
                buf[0] = 0xff; /* fixed */
 
        ret = send_control_msg(pdev,
-               SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
+               SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf));
 
        if (!mode && ret >= 0) {
                if (value < 0)
index 09d42236f7486f89cba15d64067958fe9b975986..f87757fccc72114bdafe8ad4454f8e9a3b49b20e 100644 (file)
@@ -420,6 +420,19 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
                ctrl |= SAA7134_MAIN_CTRL_TE5;
                irq  |= SAA7134_IRQ1_INTE_RA2_1 |
                        SAA7134_IRQ1_INTE_RA2_0;
+
+               /* dma: setup channel 5 (= TS) */
+
+               saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
+               saa_writeb(SAA7134_TS_DMA1,
+                       ((dev->ts.nr_packets - 1) >> 8) & 0xff);
+               /* TSNOPIT=0, TSCOLAP=0 */
+               saa_writeb(SAA7134_TS_DMA2,
+                       (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
+               saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
+               saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
+                                                 SAA7134_RS_CONTROL_ME |
+                                                 (dev->ts.pt_ts.dma >> 12));
        }
 
        /* set task conditions + field handling */
index b9817d74943fa014b9f4861c58e002dec19dcd43..03488ba4c99c07f66a15e989414b700054ae1281 100644 (file)
@@ -250,19 +250,6 @@ int saa7134_ts_start(struct saa7134_dev *dev)
 
        BUG_ON(dev->ts_started);
 
-       /* dma: setup channel 5 (= TS) */
-       saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
-       saa_writeb(SAA7134_TS_DMA1,
-               ((dev->ts.nr_packets - 1) >> 8) & 0xff);
-       /* TSNOPIT=0, TSCOLAP=0 */
-       saa_writeb(SAA7134_TS_DMA2,
-               (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
-       saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
-       saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
-                                         SAA7134_RS_CONTROL_ME |
-                                         (dev->ts.pt_ts.dma >> 12));
-
-       /* reset hardware TS buffers */
        saa_writeb(SAA7134_TS_SERIAL1, 0x00);
        saa_writeb(SAA7134_TS_SERIAL1, 0x03);
        saa_writeb(SAA7134_TS_SERIAL1, 0x00);
index 0ca39ec4ba8e7a640c2a19aeedbd562119df9f99..4a293b4444593be0e5c54505e434e183adca0f20 100644 (file)
@@ -826,13 +826,6 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
                ret = 0;
                goto out;
 
-       case V4L2_CTRL_TYPE_BUTTON:
-               v4l2_ctrl->minimum = 0;
-               v4l2_ctrl->maximum = 0;
-               v4l2_ctrl->step = 0;
-               ret = 0;
-               goto out;
-
        default:
                break;
        }
index eb2ce2630a771bea0a9f11c0c9570f4c69f1a3c9..8756be5691544d6c496dcf2803bc24adeed8c3d5 100644 (file)
@@ -57,11 +57,6 @@ static struct uvc_format_desc uvc_fmts[] = {
                .guid           = UVC_GUID_FORMAT_YUY2,
                .fcc            = V4L2_PIX_FMT_YUYV,
        },
-       {
-               .name           = "YUV 4:2:2 (YUYV)",
-               .guid           = UVC_GUID_FORMAT_YUY2_ISIGHT,
-               .fcc            = V4L2_PIX_FMT_YUYV,
-       },
        {
                .name           = "YUV 4:2:0 (NV12)",
                .guid           = UVC_GUID_FORMAT_NV12,
@@ -88,15 +83,10 @@ static struct uvc_format_desc uvc_fmts[] = {
                .fcc            = V4L2_PIX_FMT_UYVY,
        },
        {
-               .name           = "Greyscale (8-bit)",
+               .name           = "Greyscale",
                .guid           = UVC_GUID_FORMAT_Y800,
                .fcc            = V4L2_PIX_FMT_GREY,
        },
-       {
-               .name           = "Greyscale (16-bit)",
-               .guid           = UVC_GUID_FORMAT_Y16,
-               .fcc            = V4L2_PIX_FMT_Y16,
-       },
        {
                .name           = "RGB Bayer",
                .guid           = UVC_GUID_FORMAT_BY8,
@@ -436,8 +426,7 @@ static int uvc_parse_format(struct uvc_device *dev,
        /* Parse the frame descriptors. Only uncompressed, MJPEG and frame
         * based formats have frame descriptors.
         */
-       while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
-              buffer[2] == ftype) {
+       while (buflen > 2 && buffer[2] == ftype) {
                frame = &format->frame[format->nframes];
                if (ftype != UVC_VS_FRAME_FRAME_BASED)
                        n = buflen > 25 ? buffer[25] : 0;
@@ -514,14 +503,12 @@ static int uvc_parse_format(struct uvc_device *dev,
                buffer += buffer[0];
        }
 
-       if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
-           buffer[2] == UVC_VS_STILL_IMAGE_FRAME) {
+       if (buflen > 2 && buffer[2] == UVC_VS_STILL_IMAGE_FRAME) {
                buflen -= buffer[0];
                buffer += buffer[0];
        }
 
-       if (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE &&
-           buffer[2] == UVC_VS_COLORFORMAT) {
+       if (buflen > 2 && buffer[2] == UVC_VS_COLORFORMAT) {
                if (buflen < 6) {
                        uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
                               "interface %d COLORFORMAT error\n",
@@ -762,11 +749,6 @@ static int uvc_parse_streaming(struct uvc_device *dev,
                buffer += buffer[0];
        }
 
-       if (buflen)
-               uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming interface "
-                       "%d has %u bytes of trailing descriptor garbage.\n",
-                       dev->udev->devnum, alts->desc.bInterfaceNumber, buflen);
-
        /* Parse the alternate settings to find the maximum bandwidth. */
        for (i = 0; i < intf->num_altsetting; ++i) {
                struct usb_host_endpoint *ep;
@@ -2066,15 +2048,6 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_STREAM_NO_FID },
-       /* Syntek (Packard Bell EasyNote MX52 */
-       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
-                               | USB_DEVICE_ID_MATCH_INT_INFO,
-         .idVendor             = 0x174f,
-         .idProduct            = 0x8a12,
-         .bInterfaceClass      = USB_CLASS_VIDEO,
-         .bInterfaceSubClass   = 1,
-         .bInterfaceProtocol   = 0,
-         .driver_info          = UVC_QUIRK_STREAM_NO_FID },
        /* Syntek (Asus F9SG) */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2139,15 +2112,6 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_MINMAX },
-       /* Arkmicro unbranded */
-       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
-                               | USB_DEVICE_ID_MATCH_INT_INFO,
-         .idVendor             = 0x18ec,
-         .idProduct            = 0x3290,
-         .bInterfaceClass      = USB_CLASS_VIDEO,
-         .bInterfaceSubClass   = 1,
-         .bInterfaceProtocol   = 0,
-         .driver_info          = UVC_QUIRK_PROBE_DEF },
        /* Bodelin ProScopeHR */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_DEV_HI
index 030dfec314cabc2b92c4a12abbbadb450c154415..7ad0de87c0ed54251442455899a01d5f97c27ba0 100755 (executable)
@@ -112,9 +112,6 @@ struct uvc_xu_control {
 #define UVC_GUID_FORMAT_YUY2 \
        { 'Y',  'U',  'Y',  '2', 0x00, 0x00, 0x10, 0x00, \
         0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_YUY2_ISIGHT \
-       { 'Y',  'U',  'Y',  '2', 0x00, 0x00, 0x10, 0x00, \
-        0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
 #define UVC_GUID_FORMAT_NV12 \
        { 'N',  'V',  '1',  '2', 0x00, 0x00, 0x10, 0x00, \
         0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
@@ -130,13 +127,11 @@ struct uvc_xu_control {
 #define UVC_GUID_FORMAT_Y800 \
        { 'Y',  '8',  '0',  '0', 0x00, 0x00, 0x10, 0x00, \
         0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y16 \
-       { 'Y',  '1',  '6',  ' ', 0x00, 0x00, 0x10, 0x00, \
-        0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
 #define UVC_GUID_FORMAT_BY8 \
        { 'B',  'Y',  '8',  ' ', 0x00, 0x00, 0x10, 0x00, \
         0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
 
+
 /* ------------------------------------------------------------------------
  * Driver specific constants.
  */
index 64076ffec0e000bda9aa20027f3502305dd04d01..997975d5e024b06934e0a4d1d39bba4c091be85b 100644 (file)
@@ -193,24 +193,17 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u
 struct video_code32 {
        char            loadwhat[16];   /* name or tag of file being passed */
        compat_int_t    datasize;
-       compat_uptr_t   data;
+       unsigned char   *data;
 };
 
-static struct video_code __user *get_microcode32(struct video_code32 *kp)
+static int get_microcode32(struct video_code *kp, struct video_code32 __user *up)
 {
-       struct video_code __user *up;
-
-       up = compat_alloc_user_space(sizeof(*up));
-
-       /*
-        * NOTE! We don't actually care if these fail. If the
-        * user address is invalid, the native ioctl will do
-        * the error handling for us
-        */
-       (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat));
-       (void) put_user(kp->datasize, &up->datasize);
-       (void) put_user(compat_ptr(kp->data), &up->data);
-       return up;
+       if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) ||
+               copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) ||
+               get_user(kp->datasize, &up->datasize) ||
+               copy_from_user(kp->data, up->data, up->datasize))
+                       return -EFAULT;
+       return 0;
 }
 
 #define VIDIOCGTUNER32         _IOWR('v', 4, struct video_tuner32)
@@ -748,7 +741,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
                struct video_tuner vt;
                struct video_buffer vb;
                struct video_window vw;
-               struct video_code32 vc;
+               struct video_code vc;
                struct video_audio va;
 #endif
                struct v4l2_format v2f;
@@ -827,11 +820,8 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
                break;
 
        case VIDIOCSMICROCODE:
-               /* Copy the 32-bit "video_code32" to kernel space */
-               if (copy_from_user(&karg.vc, up, sizeof(karg.vc)))
-                       return -EFAULT;
-               /* Convert the 32-bit version to a 64-bit version in user space */
-               up = get_microcode32(&karg.vc);
+               err = get_microcode32(&karg.vc, up);
+               compatible_arg = 0;
                break;
 
        case VIDIOCSFREQ:
index 46bd7e2a952ceec89217323ac8f03ad032aa9803..bd83fa0a4970fb13a5ddef788d1c68a5a7b21300 100644 (file)
@@ -1330,14 +1330,13 @@ static void mspro_block_remove(struct memstick_dev *card)
        struct mspro_block_data *msb = memstick_get_drvdata(card);
        unsigned long flags;
 
+       del_gendisk(msb->disk);
+       dev_dbg(&card->dev, "mspro block remove\n");
        spin_lock_irqsave(&msb->q_lock, flags);
        msb->eject = 1;
        blk_start_queue(msb->queue);
        spin_unlock_irqrestore(&msb->q_lock, flags);
 
-       del_gendisk(msb->disk);
-       dev_dbg(&card->dev, "mspro block remove\n");
-
        blk_cleanup_queue(msb->queue);
        msb->queue = NULL;
 
index 352acd05c46b93c417596025ebf12be3b423dc01..9b2e2198aee9dd07c12328c026fd7712bac95c03 100644 (file)
@@ -621,8 +621,11 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
         */
        iocnumX = khdr.iocnum & 0xFF;
        if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
-           (iocp == NULL))
+           (iocp == NULL)) {
+               printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - ioc%d not found!\n",
+                               __FILE__, __LINE__, iocnumX);
                return -ENODEV;
+       }
 
        if (!iocp->active) {
                printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n",
index 56d98eb20a255c673f4491d64bf89e65c27d382e..6cea7181ed73b28aad0e3b62f1f4916e832e6623 100644 (file)
@@ -792,36 +792,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
                         *  precedence!
                         */
                        sc->result = (DID_OK << 16) | scsi_status;
-                       if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
-
-                               /*
-                                * For an Errata on LSI53C1030
-                                * When the length of request data
-                                * and transfer data are different
-                                * with result of command (READ or VERIFY),
-                                * DID_SOFT_ERROR is set.
+                       if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
+                               /* Have already saved the status and sense data
                                 */
-                               if (ioc->bus_type == SPI) {
-                                       if (pScsiReq->CDB[0] == READ_6  ||
-                                           pScsiReq->CDB[0] == READ_10 ||
-                                           pScsiReq->CDB[0] == READ_12 ||
-                                           pScsiReq->CDB[0] == READ_16 ||
-                                           pScsiReq->CDB[0] == VERIFY  ||
-                                           pScsiReq->CDB[0] == VERIFY_16) {
-                                               if (scsi_bufflen(sc) !=
-                                                       xfer_cnt) {
-                                                       sc->result =
-                                                       DID_SOFT_ERROR << 16;
-                                                   printk(KERN_WARNING "Errata"
-                                                   "on LSI53C1030 occurred."
-                                                   "sc->req_bufflen=0x%02x,"
-                                                   "xfer_cnt=0x%02x\n",
-                                                   scsi_bufflen(sc),
-                                                   xfer_cnt);
-                                               }
-                                       }
-                               }
-
+                               ;
+                       } else {
                                if (xfer_cnt < sc->underflow) {
                                        if (scsi_status == SAM_STAT_BUSY)
                                                sc->result = SAM_STAT_BUSY;
@@ -860,58 +835,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
                        sc->result = (DID_OK << 16) | scsi_status;
                        if (scsi_state == 0) {
                                ;
-                       } else if (scsi_state &
-                           MPI_SCSI_STATE_AUTOSENSE_VALID) {
-
-                               /*
-                                * For potential trouble on LSI53C1030.
-                                * (date:2007.xx.)
-                                * It is checked whether the length of
-                                * request data is equal to
-                                * the length of transfer and residual.
-                                * MEDIUM_ERROR is set by incorrect data.
-                                */
-                               if ((ioc->bus_type == SPI) &&
-                                       (sc->sense_buffer[2] & 0x20)) {
-                                       u32      difftransfer;
-                                       difftransfer =
-                                       sc->sense_buffer[3] << 24 |
-                                       sc->sense_buffer[4] << 16 |
-                                       sc->sense_buffer[5] << 8 |
-                                       sc->sense_buffer[6];
-                                       if (((sc->sense_buffer[3] & 0x80) ==
-                                               0x80) && (scsi_bufflen(sc)
-                                               != xfer_cnt)) {
-                                               sc->sense_buffer[2] =
-                                                   MEDIUM_ERROR;
-                                               sc->sense_buffer[12] = 0xff;
-                                               sc->sense_buffer[13] = 0xff;
-                                               printk(KERN_WARNING"Errata"
-                                               "on LSI53C1030 occurred."
-                                               "sc->req_bufflen=0x%02x,"
-                                               "xfer_cnt=0x%02x\n" ,
-                                               scsi_bufflen(sc),
-                                               xfer_cnt);
-                                       }
-                                       if (((sc->sense_buffer[3] & 0x80)
-                                               != 0x80) &&
-                                               (scsi_bufflen(sc) !=
-                                               xfer_cnt + difftransfer)) {
-                                               sc->sense_buffer[2] =
-                                                       MEDIUM_ERROR;
-                                               sc->sense_buffer[12] = 0xff;
-                                               sc->sense_buffer[13] = 0xff;
-                                               printk(KERN_WARNING
-                                               "Errata on LSI53C1030 occurred"
-                                               "sc->req_bufflen=0x%02x,"
-                                               " xfer_cnt=0x%02x,"
-                                               "difftransfer=0x%02x\n",
-                                               scsi_bufflen(sc),
-                                               xfer_cnt,
-                                               difftransfer);
-                                       }
-                               }
-
+                       } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
                                /*
                                 * If running against circa 200003dd 909 MPT f/w,
                                 * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
@@ -2439,8 +2363,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
                ioc->name,sdev->tagged_supported, sdev->simple_tags,
                sdev->ordered_tags));
 
-       blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
-
        return 0;
 }
 
index 68e4cd7d321ac7715a5bf47420ce1bc0c4bc8639..1eac626e710a38dddab8673b1cc5f751a1df7366 100644 (file)
@@ -284,11 +284,8 @@ enclosure_component_register(struct enclosure_device *edev,
        cdev->groups = enclosure_groups;
 
        err = device_register(cdev);
-       if (err) {
-               ecomp->number = -1;
-               put_device(cdev);
-               return ERR_PTR(err);
-       }
+       if (err)
+               ERR_PTR(err);
 
        return ecomp;
 }
index 55748d6a62658b26926184d4ec55d3b9ec611dbc..65877bc5edaae9e67dd391b665de91670556fd20 100644 (file)
@@ -433,23 +433,18 @@ xpc_discovery(void)
         * nodes that can comprise an access protection grouping. The access
         * protection is in regards to memory, IOI and IPI.
         */
+       max_regions = 64;
        region_size = xp_region_size;
 
-       if (is_uv())
-               max_regions = 256;
-       else {
-               max_regions = 64;
-
-               switch (region_size) {
-               case 128:
-                       max_regions *= 2;
-               case 64:
-                       max_regions *= 2;
-               case 32:
-                       max_regions *= 2;
-                       region_size = 16;
-                       DBUG_ON(!is_shub2());
-               }
+       switch (region_size) {
+       case 128:
+               max_regions *= 2;
+       case 64:
+               max_regions *= 2;
+       case 32:
+               max_regions *= 2;
+               region_size = 16;
+               DBUG_ON(!is_shub2());
        }
 
        for (region = 0; region < max_regions; region++) {
index 8e08d71df1048b252fa2274b7925795ae839dc05..c76677afda1b50591b855759fa711253d99458ee 100644 (file)
@@ -203,7 +203,6 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
        enum xp_retval xp_ret;
        int ret;
        int nid;
-       int nasid;
        int pg_order;
        struct page *page;
        struct xpc_gru_mq_uv *mq;
@@ -259,11 +258,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
                goto out_5;
        }
 
-       nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
-
        mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
        ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
-                                    nasid, mmr_value->vector, mmr_value->dest);
+                                      nid, mmr_value->vector, mmr_value->dest);
        if (ret != 0) {
                dev_err(xpc_part, "gru_create_message_queue() returned "
                        "error=%d\n", ret);
@@ -412,7 +409,6 @@ xpc_process_activate_IRQ_rcvd_uv(void)
 static void
 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
                              struct xpc_activate_mq_msghdr_uv *msg_hdr,
-                             int part_setup,
                              int *wakeup_hb_checker)
 {
        unsigned long irq_flags;
@@ -477,9 +473,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
                struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
 
-               if (!part_setup)
-                       break;
-
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_closerequest_uv,
                                   hdr);
@@ -496,9 +489,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
                struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
 
-               if (!part_setup)
-                       break;
-
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_closereply_uv,
                                   hdr);
@@ -513,9 +503,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
                struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
 
-               if (!part_setup)
-                       break;
-
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_openrequest_uv,
                                   hdr);
@@ -533,9 +520,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
                struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
 
-               if (!part_setup)
-                       break;
-
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_openreply_uv, hdr);
                args = &part->remote_openclose_args[msg->ch_number];
@@ -553,9 +537,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
                struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
 
-               if (!part_setup)
-                       break;
-
                msg = container_of(msg_hdr, struct
                                xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
                spin_lock_irqsave(&part->chctl_lock, irq_flags);
@@ -632,7 +613,6 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
 
                        part_referenced = xpc_part_ref(part);
                        xpc_handle_activate_mq_msg_uv(part, msg_hdr,
-                                                     part_referenced,
                                                      &wakeup_hb_checker);
                        if (part_referenced)
                                xpc_part_deref(part);
@@ -965,13 +945,11 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
                head->first = first->next;
                if (head->first == NULL)
                        head->last = NULL;
-
-               head->n_entries--;
-               BUG_ON(head->n_entries < 0);
-
-               first->next = NULL;
        }
+       head->n_entries--;
+       BUG_ON(head->n_entries < 0);
        spin_unlock_irqrestore(&head->lock, irq_flags);
+       first->next = NULL;
        return first;
 }
 
@@ -1040,8 +1018,7 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
        xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
                                      XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
 
-       while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
-                (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
+       while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
 
                dev_dbg(xpc_part, "waiting to make first contact with "
                        "partition %d\n", XPC_PARTID(part));
@@ -1444,6 +1421,7 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
        msg_slot = ch_uv->recv_msg_slots +
            (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
 
+       BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
        BUG_ON(msg_slot->hdr.size != 0);
 
        memcpy(msg_slot, msg, msg->hdr.size);
@@ -1667,6 +1645,8 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
                               sizeof(struct xpc_notify_mq_msghdr_uv));
        if (ret != xpSuccess)
                XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
+
+       msg->hdr.msg_slot_number += ch->remote_nentries;
 }
 
 static struct xpc_arch_operations xpc_arch_ops_uv = {
index 8faa703516b5e3359762b2ae5f724d9c04667434..fc25586b7ee1c94575e5674f89c3382c2a7ffd88 100644 (file)
@@ -530,10 +530,9 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
 {
        struct mmc_data                 *data = host->data;
 
-       if (data)
-               dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
-                            ((data->flags & MMC_DATA_WRITE)
-                             ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+       dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+                    ((data->flags & MMC_DATA_WRITE)
+                     ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
 }
 
 static void atmci_stop_dma(struct atmel_mci *host)
@@ -1038,8 +1037,8 @@ static void atmci_command_complete(struct atmel_mci *host,
                        "command error: status=0x%08x\n", status);
 
                if (cmd->data) {
-                       atmci_stop_dma(host);
                        host->data = NULL;
+                       atmci_stop_dma(host);
                        mci_writel(host, IDR, MCI_NOTBUSY
                                        | MCI_TXRDY | MCI_RXRDY
                                        | ATMCI_DATA_ERROR_FLAGS);
@@ -1230,7 +1229,6 @@ static void atmci_tasklet_func(unsigned long priv)
                        } else {
                                data->bytes_xfered = data->blocks * data->blksz;
                                data->error = 0;
-                               mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS);
                        }
 
                        if (!data->stop) {
@@ -1671,13 +1669,13 @@ static int __init atmci_probe(struct platform_device *pdev)
        ret = -ENODEV;
        if (pdata->slot[0].bus_width) {
                ret = atmci_init_slot(host, &pdata->slot[0],
-                               0, MCI_SDCSEL_SLOT_A);
+                               MCI_SDCSEL_SLOT_A, 0);
                if (!ret)
                        nr_slots++;
        }
        if (pdata->slot[1].bus_width) {
                ret = atmci_init_slot(host, &pdata->slot[1],
-                               1, MCI_SDCSEL_SLOT_B);
+                               MCI_SDCSEL_SLOT_B, 1);
                if (!ret)
                        nr_slots++;
        }
index fba147c3f54cb7a313da8ab9197ca06383b488d9..99b74a3510203a2e258d83c9f9f4d9aa4f9db816 100644 (file)
@@ -1178,7 +1178,7 @@ static int s3cmci_card_present(struct mmc_host *mmc)
        struct s3c24xx_mci_pdata *pdata = host->pdata;
        int ret;
 
-       if (pdata->no_detect)
+       if (pdata->gpio_detect == 0)
                return -ENOSYS;
 
        ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
@@ -1361,8 +1361,6 @@ static struct mmc_host_ops s3cmci_ops = {
 static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
        /* This is currently here to avoid a number of if (host->pdata)
         * checks. Any zero fields to ensure reaonable defaults are picked. */
-       .no_wprotect = 1,
-       .no_detect = 1,
 };
 
 #ifdef CONFIG_CPU_FREQ
index 14c5480e059c969e0189df22bb73c3d67b17cc7b..50997d2a63e7effcd533d9d95fea45b9ebf34efc 100644 (file)
@@ -372,28 +372,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
 
 static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
 {
-       struct sdhci_host *host =  platform_get_drvdata(pdev);
-       struct sdhci_s3c *sc = sdhci_priv(host);
-       int ptr;
-
-       sdhci_remove_host(host, 1);
-
-       for (ptr = 0; ptr < 3; ptr++) {
-               if (sc->clk_bus[ptr]) {
-                       clk_disable(sc->clk_bus[ptr]);
-                       clk_put(sc->clk_bus[ptr]);
-               }
-       }
-       clk_disable(sc->clk_io);
-       clk_put(sc->clk_io);
-
-       iounmap(host->ioaddr);
-       release_resource(sc->ioarea);
-       kfree(sc->ioarea);
-
-       sdhci_free_host(host);
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index f43edfd064c147c0bcfe9f36a7b1e5a90a2bf61e..91991b460c4547c7a90a583ca987f087c6babd53 100644 (file)
@@ -161,7 +161,6 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_data *data = host->data;
-       void *sg_virt;
        unsigned short *buf;
        unsigned int count;
        unsigned long flags;
@@ -171,8 +170,8 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
                return;
        }
 
-       sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
-       buf = (unsigned short *)(sg_virt + host->sg_off);
+       buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
+             host->sg_off);
 
        count = host->sg_ptr->length - host->sg_off;
        if (count > data->blksz)
@@ -189,7 +188,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
        host->sg_off += count;
 
-       tmio_mmc_kunmap_atomic(sg_virt, &flags);
+       tmio_mmc_kunmap_atomic(host, &flags);
 
        if (host->sg_off == host->sg_ptr->length)
                tmio_mmc_next_sg(host);
index ee8fa89b2b57b2b194e8ae442eee12d8716e3141..9fa9985949743be7920eccb0a1627036ea8dcc37 100644 (file)
 
 #define ack_mmc_irqs(host, i) \
        do { \
-               sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
+               u32 mask;\
+               mask  = sd_ctrl_read32((host), CTL_STATUS); \
+               mask &= ~((i) & TMIO_MASK_IRQ); \
+               sd_ctrl_write32((host), CTL_STATUS, mask); \
        } while (0)
 
 
@@ -197,17 +200,19 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
        return --host->sg_len;
 }
 
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
+static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
        unsigned long *flags)
 {
+       struct scatterlist *sg = host->sg_ptr;
+
        local_irq_save(*flags);
        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 }
 
-static inline void tmio_mmc_kunmap_atomic(void *virt,
+static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
        unsigned long *flags)
 {
-       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+       kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
        local_irq_restore(*flags);
 }
 
index 776183fb2b59c59d3a6edbf718e92598b59bb799..6ea520ae2410a9113690641670d488321ac3da23 100644 (file)
@@ -316,7 +316,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
 #define tAR_NDTR1(r)   (((r) >> 0) & 0xf)
 
 /* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk)      (int)((ns) * (clk / 1000000) / 1000)
+#define ns2cycle(ns, clk)      (int)(((ns) * (clk / 1000000) / 1000) - 1)
 
 /* convert nand flash controller clock cycles to nano-seconds */
 #define cycle2ns(c, clk)       ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
index 9fb3a0bd994a10f2ce1a63afe355711fc67e65b2..c71e12d05f6ebe25715f5ee9784568aadb1d5f80 100644 (file)
@@ -380,12 +380,6 @@ out:
     return retval;
 }
 
-static irqreturn_t el2_probe_interrupt(int irq, void *seen)
-{
-       *(bool *)seen = true;
-       return IRQ_HANDLED;
-}
-
 static int
 el2_open(struct net_device *dev)
 {
@@ -397,35 +391,22 @@ el2_open(struct net_device *dev)
 
        outb(EGACFR_NORM, E33G_GACFR);  /* Enable RAM and interrupts. */
        do {
-               bool seen;
-
-               retval = request_irq(*irqp, el2_probe_interrupt, 0,
-                                    dev->name, &seen);
-               if (retval == -EBUSY)
-                       continue;
-               if (retval < 0)
-                       goto err_disable;
-
+           retval = request_irq(*irqp, NULL, 0, "bogus", dev);
+           if (retval >= 0) {
                /* Twinkle the interrupt, and check if it's seen. */
-               seen = false;
-               smp_wmb();
+               unsigned long cookie = probe_irq_on();
                outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
                outb_p(0x00, E33G_IDCFR);
-               msleep(1);
-               free_irq(*irqp, el2_probe_interrupt);
-               if (!seen)
-                       continue;
-
-               retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
-                                    dev->name, dev);
-               if (retval == -EBUSY)
-                       continue;
-               if (retval < 0)
-                       goto err_disable;
+               if (*irqp == probe_irq_off(cookie)      /* It's a good IRQ line! */
+                   && ((retval = request_irq(dev->irq = *irqp,
+                   eip_interrupt, 0, dev->name, dev)) == 0))
+                   break;
+           } else {
+                   if (retval != -EBUSY)
+                           return retval;
+           }
        } while (*++irqp);
-
        if (*irqp == 0) {
-       err_disable:
            outb(EGACFR_IRQOFF, E33G_GACFR);    /* disable interrupts. */
            return -EAGAIN;
        }
index 69698e504f3d019f0b2cd3ac66887118af040b63..dbf4de39754d1f4009edfdb02f4bfa1e80e56baa 100644 (file)
@@ -165,8 +165,8 @@ static struct pci_device_id com20020pci_id_table[] = {
        { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
        { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
        { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
-       { 0x10B5, 0x9030, 0x10B5,     0x2978,     0, 0, ARC_CAN_10MBIT },
-       { 0x10B5, 0x9050, 0x10B5,     0x2273,     0, 0, ARC_CAN_10MBIT },
+       { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+       { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
        { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
        { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
        {0,}
index b0fb7254e2cb75b7839ee334361576356d441ac9..60edb9f232bb8a6d8ce0623f7346ddab5fe3ef52 100644 (file)
@@ -394,13 +394,11 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
        .get_eeprom             = atl1e_get_eeprom,
        .set_eeprom             = atl1e_set_eeprom,
        .get_tx_csum            = atl1e_get_tx_csum,
-       .set_tx_csum            = ethtool_op_set_tx_hw_csum,
        .get_sg                 = ethtool_op_get_sg,
        .set_sg                 = ethtool_op_set_sg,
 #ifdef NETIF_F_TSO
        .get_tso                = ethtool_op_get_tso,
 #endif
-       .set_tso                = ethtool_op_set_tso,
 };
 
 void atl1e_set_ethtool_ops(struct net_device *netdev)
index 403bfb6d13ee2cb6d982b6f7a0a0893ddbe16ecc..00569dc1313cd5c2a1f7ead4603ac8deb7bec4ff 100644 (file)
@@ -2856,11 +2856,10 @@ static int atl1_resume(struct pci_dev *pdev)
        pci_enable_wake(pdev, PCI_D3cold, 0);
 
        atl1_reset_hw(&adapter->hw);
+       adapter->cmb.cmb->int_stats = 0;
 
-       if (netif_running(netdev)) {
-               adapter->cmb.cmb->int_stats = 0;
+       if (netif_running(netdev))
                atl1_up(adapter);
-       }
        netif_device_attach(netdev);
 
        return 0;
index 137cb031df6cdc9b36e97abc06851cd62124e943..4869adb695865a3f7dc4ff3bacd1ee5a260b296d 100644 (file)
@@ -2175,6 +2175,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
        dev->irq = sdev->irq;
        SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
 
+       netif_carrier_off(dev);
+
        err = ssb_bus_powerup(sdev->bus, 0);
        if (err) {
                dev_err(sdev->dev,
@@ -2214,8 +2216,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
                goto err_out_powerdown;
        }
 
-       netif_carrier_off(dev);
-
        ssb_set_drvdata(sdev, dev);
 
        /* Chip reset provides power to the b44 MAC & PCI cores, which
index 4874b2bd6bbd6ffc501fc7263486b9bf705e332a..08cddb6ff740f4dfac35677e91424378bcf3b2e9 100644 (file)
@@ -247,9 +247,6 @@ static const struct flash_spec flash_5709 = {
 
 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
-static void bnx2_init_napi(struct bnx2 *bp);
-static void bnx2_del_napi(struct bnx2 *bp);
-
 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 {
        u32 diff;
@@ -4755,12 +4752,8 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
                rc = bnx2_alloc_bad_rbuf(bp);
        }
 
-       if (bp->flags & BNX2_FLAG_USING_MSIX) {
+       if (bp->flags & BNX2_FLAG_USING_MSIX)
                bnx2_setup_msix_tbl(bp);
-               /* Prevent MSIX table reads and write from timing out */
-               REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
-                       BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
-       }
 
        return rc;
 }
@@ -6176,7 +6169,6 @@ bnx2_open(struct net_device *dev)
        bnx2_disable_int(bp);
 
        bnx2_setup_int_mode(bp, disable_msi);
-       bnx2_init_napi(bp);
        bnx2_napi_enable(bp);
        rc = bnx2_alloc_mem(bp);
        if (rc)
@@ -6238,7 +6230,6 @@ open_err:
        bnx2_free_skbs(bp);
        bnx2_free_irq(bp);
        bnx2_free_mem(bp);
-       bnx2_del_napi(bp);
        return rc;
 }
 
@@ -6446,7 +6437,6 @@ bnx2_close(struct net_device *dev)
        bnx2_free_irq(bp);
        bnx2_free_skbs(bp);
        bnx2_free_mem(bp);
-       bnx2_del_napi(bp);
        bp->link_up = 0;
        netif_carrier_off(bp->dev);
        bnx2_set_power_state(bp, PCI_D3hot);
@@ -8022,21 +8012,12 @@ bnx2_bus_string(struct bnx2 *bp, char *str)
        return str;
 }
 
-static void
-bnx2_del_napi(struct bnx2 *bp)
-{
-       int i;
-
-       for (i = 0; i < bp->irq_nvecs; i++)
-               netif_napi_del(&bp->bnx2_napi[i].napi);
-}
-
-static void
+static void __devinit
 bnx2_init_napi(struct bnx2 *bp)
 {
        int i;
 
-       for (i = 0; i < bp->irq_nvecs; i++) {
+       for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
                int (*poll)(struct napi_struct *, int);
 
@@ -8105,6 +8086,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->ethtool_ops = &bnx2_ethtool_ops;
 
        bp = netdev_priv(dev);
+       bnx2_init_napi(bp);
 
        pci_set_drvdata(pdev, dev);
 
index d3854ac22cbf3cb3fda6ec33c19c3dbe7a545c55..c3fa31c9f2a7440f893f60ba10e6007ac9150251 100644 (file)
@@ -2451,9 +2451,6 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
        if (!(dev->flags & IFF_MASTER))
                goto out;
 
-       if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
-               goto out;
-
        read_lock(&bond->lock);
        slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
                                        orig_dev);
index 71143751d8fd52a1ed78bc9c58a7a6f6ac03780d..9b5936f072dcc48232e9dd4e9c580652ad6ca0dc 100644 (file)
@@ -370,9 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
                goto out;
        }
 
-       if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
-               goto out;
-
        if (skb->len < sizeof(struct arp_pkt)) {
                pr_debug("Packet is too small to be an ARP\n");
                goto out;
index 9463e5db95666eb7b6838e4da045a7f3c2026557..16d2ecd2a3b7b4fa08d7ce95c8e342cdc3788856 100644 (file)
@@ -84,20 +84,6 @@ static struct can_bittiming_const sja1000_bittiming_const = {
        .brp_inc = 1,
 };
 
-static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
-{
-       unsigned long flags;
-
-       /*
-        * The command register needs some locking and time to settle
-        * the write_reg() operation - especially on SMP systems.
-        */
-       spin_lock_irqsave(&priv->cmdreg_lock, flags);
-       priv->write_reg(priv, REG_CMR, val);
-       priv->read_reg(priv, REG_SR);
-       spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
-}
-
 static int sja1000_probe_chip(struct net_device *dev)
 {
        struct sja1000_priv *priv = netdev_priv(dev);
@@ -293,7 +279,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
 
        can_put_echo_skb(skb, dev, 0);
 
-       sja1000_write_cmdreg(priv, CMD_TR);
+       priv->write_reg(priv, REG_CMR, CMD_TR);
 
        return NETDEV_TX_OK;
 }
@@ -348,7 +334,7 @@ static void sja1000_rx(struct net_device *dev)
                cf->data[i++] = 0;
 
        /* release receive buffer */
-       sja1000_write_cmdreg(priv, CMD_RRB);
+       priv->write_reg(priv, REG_CMR, CMD_RRB);
 
        netif_rx(skb);
 
@@ -382,7 +368,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
                cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
                stats->rx_over_errors++;
                stats->rx_errors++;
-               sja1000_write_cmdreg(priv, CMD_CDO);    /* clear bit */
+               priv->write_reg(priv, REG_CMR, CMD_CDO);        /* clear bit */
        }
 
        if (isrc & IRQ_EI) {
index cfd3f57e4ab53076b5c97e32a4f1bf5cd8298aeb..302d2c763ad7cfb3c2a18da3a2911e2cf786b876 100644 (file)
@@ -165,7 +165,6 @@ struct sja1000_priv {
 
        void __iomem *reg_base;  /* ioremap'ed address to registers */
        unsigned long irq_flags; /* for request_irq() */
-       spinlock_t cmdreg_lock;  /* lock for concurrent cmd register writes */
 
        u16 flags;              /* custom mode flags */
        u8 ocr;                 /* output control register */
index 1cace005bff202ae4ff0c1d3dfb7dc81d4f0cb3f..61f9da2b49431d88056daa1bbcb4b6bceaee86b7 100644 (file)
@@ -1176,8 +1176,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
                if (netif_msg_drv(priv))
                        printk(KERN_ERR "%s: Could not attach to PHY\n",
                               dev->name);
-               rc = PTR_ERR(priv->phy);
-               goto fail;
+               return PTR_ERR(priv->phy);
        }
 
        if ((rc = register_netdev(dev))) {
index 35cd36729155ee18a7dcd44d883890a980c0b87d..5248f9e0b2f4c4a934a78cf8209381da57077978 100644 (file)
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
 int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
                            int phy_addr, const struct mdio_ops *mdio_ops)
 {
-       cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
+       cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops,
                  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
                  "10GBASE-CX4");
        return 0;
index 2b378e75b1b02389a848dc4e76542b09bd13e4df..34e776c5f06b4391e1a7bf42ff1cffb72c8b9ce7 100644 (file)
@@ -1274,7 +1274,6 @@ static void cxgb_down(struct adapter *adapter)
 
        free_irq_resources(adapter);
        quiesce_rx(adapter);
-       t3_sge_stop(adapter);
        flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
 }
 
@@ -2275,8 +2274,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
        case CHELSIO_GET_QSET_NUM:{
                struct ch_reg edata;
 
-               memset(&edata, 0, sizeof(struct ch_reg));
-
                edata.cmd = CHELSIO_GET_QSET_NUM;
                edata.val = pi->nqsets;
                if (copy_to_user(useraddr, &edata, sizeof(edata)))
index 3a80d0a3418e0a6ab8a58b84cbbed6069b020c21..52cbc2f95608369767b87af6b79096175d485591 100755 (executable)
@@ -471,9 +471,10 @@ static uint32_t dm9000_get_rx_csum(struct net_device *dev)
        return dm->rx_csum;
 }
 
-static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
+static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
 {
        board_info_t *dm = to_dm9000_board(dev);
+       unsigned long flags;
 
        if (dm->can_csum) {
                dm->rx_csum = data;
@@ -485,19 +486,6 @@ static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
        return -EOPNOTSUPP;
 }
 
-static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
-{
-       board_info_t *dm = to_dm9000_board(dev);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&dm->lock, flags);
-       ret = dm9000_set_rx_csum_unlocked(dev, data);
-       spin_unlock_irqrestore(&dm->lock, flags);
-
-       return ret;
-}
-
 static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
 {
        board_info_t *dm = to_dm9000_board(dev);
@@ -676,7 +664,7 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
  *  Set DM9000 multicast address
  */
 static void
-dm9000_hash_table_unlocked(struct net_device *dev)
+dm9000_hash_table(struct net_device *dev)
 {
        board_info_t *db = netdev_priv(dev);
        struct dev_mc_list *mcptr = dev->mc_list;
@@ -685,9 +673,11 @@ dm9000_hash_table_unlocked(struct net_device *dev)
        u32 hash_val;
        u16 hash_table[4];
        u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+       unsigned long flags;
 
        dm9000_dbg(db, 1, "entering %s\n", __func__);
        
+       spin_lock_irqsave(&db->lock, flags);
        for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
                iow(db, oft, dev->dev_addr[i]);
 
@@ -717,16 +707,6 @@ dm9000_hash_table_unlocked(struct net_device *dev)
        }
 
        iow(db, DM9000_RCR, rcr);
-}
-
-static void
-dm9000_hash_table(struct net_device *dev)
-{
-       board_info_t *db = netdev_priv(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&db->lock, flags);
-       dm9000_hash_table_unlocked(dev);
        spin_unlock_irqrestore(&db->lock, flags);
 }
 
@@ -745,7 +725,7 @@ dm9000_init_dm9000(struct net_device *dev)
        db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
 
        /* Checksum mode */
-       dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
+       dm9000_set_rx_csum(dev, db->rx_csum);
 
        /* GPIO0 on pre-activate PHY */
        iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
@@ -765,7 +745,7 @@ dm9000_init_dm9000(struct net_device *dev)
        iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
 
        /* Set address filter table */
-       dm9000_hash_table_unlocked(dev);
+       dm9000_hash_table(dev);
 
        imr = IMR_PAR | IMR_PTM | IMR_PRM;
        if (db->type != TYPE_DM9000E)
index 11f3b7c7422fc1a5b2c9a3bbdd5716f73574745e..aaea41ef794dc1ee1fe23d124a4192b058e146e5 100644 (file)
@@ -304,7 +304,7 @@ enum e1e_registers {
 #define E1000_KMRNCTRLSTA_DIAG_OFFSET  0x3    /* Kumeran Diagnostic */
 #define E1000_KMRNCTRLSTA_DIAG_NELPBK  0x1000 /* Nearend Loopback mode */
 #define E1000_KMRNCTRLSTA_K1_CONFIG    0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE    0x0002
+#define E1000_KMRNCTRLSTA_K1_ENABLE    0x140E
 #define E1000_KMRNCTRLSTA_K1_DISABLE   0x1400
 
 #define IFE_PHY_EXTENDED_STATUS_CONTROL        0x10
@@ -356,7 +356,6 @@ enum e1e_registers {
 #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT    0x10BA
 #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT    0x10BB
 
-#define E1000_DEV_ID_ICH8_82567V_3             0x1501
 #define E1000_DEV_ID_ICH8_IGP_M_AMT            0x1049
 #define E1000_DEV_ID_ICH8_IGP_AMT              0x104A
 #define E1000_DEV_ID_ICH8_IGP_C                        0x104B
index c688b55c1b7564adc002f18a5da93bf5170f9a70..eff3f478365556bf00a5ed996a398bf051190105 100644 (file)
@@ -3209,7 +3209,6 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
        u32 phy_ctrl;
 
        switch (hw->mac.type) {
-       case e1000_ich8lan:
        case e1000_ich9lan:
        case e1000_ich10lan:
        case e1000_pchlan:
index d177a02dd20103f4859e546467b57546ef086d95..21545306bc1d433d6b0decf2325f0a1481a4d3b1 100644 (file)
@@ -665,8 +665,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                                i = 0;
                }
 
-               if (i == tx_ring->next_to_use)
-                       break;
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
        }
@@ -3073,18 +3071,13 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
 
        /* disable SERR in case the MSI write causes a master abort */
        pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
-       if (pci_cmd & PCI_COMMAND_SERR)
-               pci_write_config_word(adapter->pdev, PCI_COMMAND,
-                                     pci_cmd & ~PCI_COMMAND_SERR);
+       pci_write_config_word(adapter->pdev, PCI_COMMAND,
+                             pci_cmd & ~PCI_COMMAND_SERR);
 
        err = e1000_test_msi_interrupt(adapter);
 
-       /* re-enable SERR */
-       if (pci_cmd & PCI_COMMAND_SERR) {
-               pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
-               pci_cmd |= PCI_COMMAND_SERR;
-               pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
-       }
+       /* restore previous setting of command word */
+       pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
 
        /* success ! */
        if (!err)
@@ -5367,7 +5360,6 @@ static struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
 
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
index fd57fb421e912af88a39a54f253b4007e7e66079..f5b96cadeb2544994c4aadeeb45f30113c082116 100644 (file)
@@ -554,8 +554,6 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
        equalizer_t *eql;
        master_config_t mc;
 
-       memset(&mc, 0, sizeof(master_config_t));
-
        if (eql_is_master(dev)) {
                eql = netdev_priv(dev);
                mc.max_slaves = eql->max_slaves;
index 7cd446d0f51a96793c463dfc2ff1923407b22145..3116601dbfea859984fa480edd0ecd4db5893e4e 100644 (file)
@@ -5900,7 +5900,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        /* Limit the number of tx's outstanding for hw bug */
        if (id->driver_data & DEV_NEED_TX_LIMIT) {
                np->tx_limit = 1;
-               if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
+               if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
                    pci_dev->revision >= 0xA2)
                        np->tx_limit = 0;
        }
index 934a28fde6118b99fa8aa959df9d972e302fd32b..5bf31f1509c93acca119f3e87175f97505bd4d31 100644 (file)
@@ -1621,7 +1621,7 @@ static int gfar_clean_tx_ring(struct net_device *dev)
                if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
                                skb_recycle_check(skb, priv->rx_buffer_size +
                                        RXBUF_ALIGNMENT))
-                       skb_queue_head(&priv->rx_recycle, skb);
+                       __skb_queue_head(&priv->rx_recycle, skb);
                else
                        dev_kfree_skb_any(skb);
 
@@ -1703,7 +1703,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        struct sk_buff *skb = NULL;
 
-       skb = skb_dequeue(&priv->rx_recycle);
+       skb = __skb_dequeue(&priv->rx_recycle);
        if (!skb)
                skb = netdev_alloc_skb(dev,
                                priv->rx_buffer_size + RXBUF_ALIGNMENT);
@@ -1862,7 +1862,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
                                 * recycle list.
                                 */
                                skb->data = skb->head + NET_SKB_PAD;
-                               skb_queue_head(&priv->rx_recycle, skb);
+                               __skb_queue_head(&priv->rx_recycle, skb);
                        }
                } else {
                        /* Increment the number of packets */
index 33352ffa9669cb33bc25b16542bb4589f4d526a4..f8f5772557cefa661d864d0e0dfa1fde6c467ac0 100644 (file)
@@ -81,7 +81,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                break;
        case E1000_DEV_ID_82576:
        case E1000_DEV_ID_82576_NS:
-       case E1000_DEV_ID_82576_NS_SERDES:
        case E1000_DEV_ID_82576_FIBER:
        case E1000_DEV_ID_82576_SERDES:
        case E1000_DEV_ID_82576_QUAD_COPPER:
@@ -1168,18 +1167,9 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
 
-       /*
-        * If there's an alternate MAC address place it in RAR0
-        * so that it will override the Si installed default perm
-        * address.
-        */
-       ret_val = igb_check_alt_mac_addr(hw);
-       if (ret_val)
-               goto out;
-
-       ret_val = igb_read_mac_addr(hw);
+       if (igb_check_alt_mac_addr(hw))
+               ret_val = igb_read_mac_addr(hw);
 
-out:
        return ret_val;
 }
 
index 72081df3a39714b7e99a37b8e91ef32e808347c4..119869b1124dd0c7495e16da522fc8fee2748702 100644 (file)
@@ -42,7 +42,6 @@ struct e1000_hw;
 #define E1000_DEV_ID_82576_SERDES             0x10E7
 #define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8
 #define E1000_DEV_ID_82576_NS                 0x150A
-#define E1000_DEV_ID_82576_NS_SERDES          0x1518
 #define E1000_DEV_ID_82576_SERDES_QUAD        0x150D
 #define E1000_DEV_ID_82575EB_COPPER           0x10A7
 #define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9
@@ -53,8 +52,6 @@ struct e1000_hw;
 
 #define E1000_FUNC_1     1
 
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
-
 enum e1000_mac_type {
        e1000_undefined = 0,
        e1000_82575,
index d4fa82c45fb943a05848a9690a5cd2c7a286e203..7d76bb085e105923080bb8304209f0874c5eeff6 100644 (file)
@@ -185,12 +185,13 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
        }
 
        if (nvm_alt_mac_addr_offset == 0xFFFF) {
-               /* There is no Alternate MAC Address */
+               ret_val = -(E1000_NOT_IMPLEMENTED);
                goto out;
        }
 
        if (hw->bus.func == E1000_FUNC_1)
-               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+               nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
+
        for (i = 0; i < ETH_ALEN; i += 2) {
                offset = nvm_alt_mac_addr_offset + (i >> 1);
                ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@@ -205,16 +206,14 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
 
        /* if multicast bit is set, the alternate address will not be used */
        if (alt_mac_addr[0] & 0x01) {
-               hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+               ret_val = -(E1000_NOT_IMPLEMENTED);
                goto out;
        }
 
-       /*
-        * We have a valid alternate MAC address, and we want to treat it the
-        * same as the normal permanent MAC address stored by the HW into the
-        * RAR. Do this by mapping this address into RAR0.
-        */
-       hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+       for (i = 0; i < ETH_ALEN; i++)
+               hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
+
+       hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
 
 out:
        return ret_val;
index 8111776927ae30ffcbc60ffb56daebbe80f744a0..714c3a4a44eff5a1d43b538a6c6cf380bfea445c 100644 (file)
@@ -63,7 +63,6 @@ static const struct e1000_info *igb_info_tbl[] = {
 static struct pci_device_id igb_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
-       { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
index 9c4214993b8232b5e230f41ca52dd54900121edd..34b04924c8a1f6412f7231306833c3ceebafbe63 100644 (file)
@@ -332,7 +332,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_KX4:
        case IXGBE_DEV_ID_82599_KX4_MEZZ:
        case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-       case IXGBE_DEV_ID_82599_KR:
        case IXGBE_DEV_ID_82599_XAUI_LOM:
                /* Default device ID is mezzanine card KX/KX4 */
                media_type = ixgbe_media_type_backplane;
index a873c5d7931a5e784b09a8786a169ad42ddd8eef..a456578b85786da64b32c75d98fafbe169b12893 100644 (file)
@@ -96,8 +96,6 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
         board_82599 },
-       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
-        board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
@@ -5241,13 +5239,9 @@ static int ixgbe_maybe_stop_tx(struct net_device *netdev,
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
-       int txq = smp_processor_id();
 
-       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
-               while (unlikely(txq >= dev->real_num_tx_queues))
-                       txq -= dev->real_num_tx_queues;
-               return txq;
-       }
+       if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+               return smp_processor_id();
 
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
index 7d66f5bbbec9c6d0ca4662f1ff624aa9b09881bb..ef4bdd58e0162e96c1bfad79980d2fd3dc3a73bb 100644 (file)
@@ -50,7 +50,6 @@
 #define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4
 #define IXGBE_DEV_ID_82599_KX4           0x10F7
 #define IXGBE_DEV_ID_82599_KX4_MEZZ      0x1514
-#define IXGBE_DEV_ID_82599_KR            0x1517
 #define IXGBE_DEV_ID_82599_CX4           0x10F9
 #define IXGBE_DEV_ID_82599_SFP           0x10FB
 #define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
index a893f45db817498cdc4251e2bf955472fd88dd15..1d2a32544ed2d50189d53c7197f976834a5983e3 100644 (file)
@@ -946,8 +946,6 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
                                jme->jme_vlan_rx(skb, jme->vlgrp,
                                        le16_to_cpu(rxdesc->descwb.vlan));
                                NET_STAT(jme).rx_bytes += 4;
-                       } else {
-                               dev_kfree_skb(skb);
                        }
                } else {
                        jme->jme_rx(skb);
@@ -1578,16 +1576,6 @@ jme_free_irq(struct jme_adapter *jme)
        }
 }
 
-static inline void
-jme_phy_on(struct jme_adapter *jme)
-{
-       u32 bmcr;
-
-       bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
-       bmcr &= ~BMCR_PDOWN;
-       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
-}
-
 static int
 jme_open(struct net_device *netdev)
 {
@@ -1608,12 +1596,10 @@ jme_open(struct net_device *netdev)
 
        jme_start_irq(jme);
 
-       if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-               jme_phy_on(jme);
+       if (test_bit(JME_FLAG_SSET, &jme->flags))
                jme_set_settings(netdev, &jme->old_ecmd);
-       } else {
+       else
                jme_reset_phy_processor(jme);
-       }
 
        jme_reset_link(jme);
 
@@ -2099,45 +2085,12 @@ jme_tx_timeout(struct net_device *netdev)
        jme_reset_link(jme);
 }
 
-static inline void jme_pause_rx(struct jme_adapter *jme)
-{
-       atomic_dec(&jme->link_changing);
-
-       jme_set_rx_pcc(jme, PCC_OFF);
-       if (test_bit(JME_FLAG_POLL, &jme->flags)) {
-               JME_NAPI_DISABLE(jme);
-       } else {
-               tasklet_disable(&jme->rxclean_task);
-               tasklet_disable(&jme->rxempty_task);
-       }
-}
-
-static inline void jme_resume_rx(struct jme_adapter *jme)
-{
-       struct dynpcc_info *dpi = &(jme->dpi);
-
-       if (test_bit(JME_FLAG_POLL, &jme->flags)) {
-               JME_NAPI_ENABLE(jme);
-       } else {
-               tasklet_hi_enable(&jme->rxclean_task);
-               tasklet_hi_enable(&jme->rxempty_task);
-       }
-       dpi->cur                = PCC_P1;
-       dpi->attempt            = PCC_P1;
-       dpi->cnt                = 0;
-       jme_set_rx_pcc(jme, PCC_P1);
-
-       atomic_inc(&jme->link_changing);
-}
-
 static void
 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
 
-       jme_pause_rx(jme);
        jme->vlgrp = grp;
-       jme_resume_rx(jme);
 }
 
 static void
@@ -3025,12 +2978,10 @@ jme_resume(struct pci_dev *pdev)
        jme_clear_pm(jme);
        pci_restore_state(pdev);
 
-       if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-               jme_phy_on(jme);
+       if (test_bit(JME_FLAG_SSET, &jme->flags))
                jme_set_settings(netdev, &jme->old_ecmd);
-       } else {
+       else
                jme_reset_phy_processor(jme);
-       }
 
        jme_start_irq(jme);
        netif_device_attach(netdev);
index c0ceebccaa49ce38b75df1c9c30a67ec56c4b7b0..c146304d8d6ca6398b22a3bef00e10416719fe79 100644 (file)
@@ -854,8 +854,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
 
 static irqreturn_t ks_irq(int irq, void *pw)
 {
-       struct net_device *netdev = pw;
-       struct ks_net *ks = netdev_priv(netdev);
+       struct ks_net *ks = pw;
+       struct net_device *netdev = ks->netdev;
        u16 status;
 
        /*this should be the first in IRQ handler */
index 83eef8e35b769cf0ea144326b776f135cb5baec0..04b382fcb8c881c2ab76fea05636deb63c64effd 100644 (file)
@@ -174,10 +174,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
 
                                if (chunk->nsg <= 0)
                                        goto fail;
-                       }
 
-                       if (chunk->npages == MLX4_ICM_CHUNK_LEN)
                                chunk = NULL;
+                       }
 
                        npages -= 1 << cur_order;
                } else {
index 0f3ae462d4317ec670a72423431eccebf5a4bf04..8a0904368e0838027e7a23621918203ebdd02d5e 100644 (file)
@@ -1199,6 +1199,7 @@ netxen_process_rcv(struct netxen_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
+       skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, netdev);
 
        napi_gro_receive(&sds_ring->napi, skb);
@@ -1260,6 +1261,8 @@ netxen_process_lro(struct netxen_adapter *adapter,
 
        skb_put(skb, lro_length + data_offset);
 
+       skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
+
        skb_pull(skb, l2_hdr_offset);
        skb->protocol = eth_type_trans(skb, netdev);
 
index b724d7faa3c8bad73241c5c6d76f6509f5bd289b..5910df60c93eee6a16705ef350546aef6593107a 100644 (file)
@@ -1178,8 +1178,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        /* Calculate UDP checksum if configured to do so */
        if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
                skb->ip_summed = CHECKSUM_NONE;
-       else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
-                (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
+       else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
                skb->ip_summed = CHECKSUM_COMPLETE;
                csum = skb_checksum(skb, 0, udp_len, 0);
                uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
index 9ee9f01a929b99711c7c19e3dd9227aa525ff3fe..8b14c6eda7c3379e67eda5fac94960e0e538b46b 100644 (file)
 #define RX_DESC_SIZE   (RX_DCNT * sizeof(struct r6040_descriptor))
 #define TX_DESC_SIZE   (TX_DCNT * sizeof(struct r6040_descriptor))
 #define MBCR_DEFAULT   0x012A  /* MAC Bus Control Register */
-#define MCAST_MAX      3       /* Max number multicast addresses to filter */
+#define MCAST_MAX      4       /* Max number multicast addresses to filter */
 
 /* Descriptor status */
 #define DSC_OWNER_MAC  0x8000  /* MAC is the owner of this descriptor */
@@ -985,6 +985,9 @@ static void r6040_multicast_list(struct net_device *dev)
                        crc >>= 26;
                        hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
                }
+               /* Write the index of the hash table */
+               for (i = 0; i < 4; i++)
+                       iowrite16(hash_table[i] << 14, ioaddr + MCR1);
                /* Fill the MAC hash tables with their values */
                iowrite16(hash_table[0], ioaddr + MAR0);
                iowrite16(hash_table[1], ioaddr + MAR1);
@@ -992,7 +995,6 @@ static void r6040_multicast_list(struct net_device *dev)
                iowrite16(hash_table[3], ioaddr + MAR3);
        }
        /* Multicast Address 1~4 case */
-       dmi = dev->mc_list;
        for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
                adrp = (u16 *)dmi->dmi_addr;
                iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
@@ -1001,9 +1003,9 @@ static void r6040_multicast_list(struct net_device *dev)
                dmi = dmi->next;
        }
        for (i = dev->mc_count; i < MCAST_MAX; i++) {
-               iowrite16(0xffff, ioaddr + MID_1L + 8*i);
-               iowrite16(0xffff, ioaddr + MID_1M + 8*i);
-               iowrite16(0xffff, ioaddr + MID_1H + 8*i);
+               iowrite16(0xffff, ioaddr + MID_0L + 8*i);
+               iowrite16(0xffff, ioaddr + MID_0M + 8*i);
+               iowrite16(0xffff, ioaddr + MID_0H + 8*i);
        }
 }
 
index 7dd213239f2bd56dc47bc70133055161c9c89117..0fe2fc90f207ebdf74d01620b50781fb2d556882 100644 (file)
@@ -186,12 +186,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
 
 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 
-/*
- * we set our copybreak very high so that we don't have
- * to allocate 16k frames all the time (see note in
- * rtl8169_open()
- */
-static int rx_copybreak = 16383;
+static int rx_copybreak = 200;
 static int use_dac;
 static struct {
        u32 msg_enable;
@@ -557,11 +552,6 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
                        break;
                udelay(25);
        }
-       /*
-        * According to hardware specs a 20us delay is required after write
-        * complete indication, but before sending next command.
-        */
-       udelay(20);
 }
 
 static int mdio_read(void __iomem *ioaddr, int reg_addr)
@@ -581,12 +571,6 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
                }
                udelay(25);
        }
-       /*
-        * According to hardware specs a 20us delay is required after read
-        * complete indication, but before sending next command.
-        */
-       udelay(20);
-
        return value;
 }
 
@@ -2843,13 +2827,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
        spin_lock_irq(&tp->lock);
 
        RTL_W8(Cfg9346, Cfg9346_Unlock);
-
-       RTL_W32(MAC4, high);
-       RTL_R32(MAC4);
-
        RTL_W32(MAC0, low);
-       RTL_R32(MAC0);
-
+       RTL_W32(MAC4, high);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        spin_unlock_irq(&tp->lock);
@@ -3266,13 +3245,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
 }
 
 static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
-                                 unsigned int mtu)
+                                 struct net_device *dev)
 {
-       unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
-
-       if (max_frame != 16383)
-               printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
-                       "NIC may lead to frame reception errors!\n");
+       unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 
        tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
 }
@@ -3284,17 +3259,7 @@ static int rtl8169_open(struct net_device *dev)
        int retval = -ENOMEM;
 
 
-       /*
-        * Note that we use a magic value here, its wierd I know
-        * its done because, some subset of rtl8169 hardware suffers from
-        * a problem in which frames received that are longer than
-        * the size set in RxMaxSize register return garbage sizes
-        * when received.  To avoid this we need to turn off filtering,
-        * which is done by setting a value of 16383 in the RxMaxSize register
-        * and allocating 16k frames to handle the largest possible rx value
-        * thats what the magic math below does.
-        */
-       rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
+       rtl8169_set_rxbufsize(tp, dev);
 
        /*
         * Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3947,7 +3912,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
 
        rtl8169_down(dev);
 
-       rtl8169_set_rxbufsize(tp, dev->mtu);
+       rtl8169_set_rxbufsize(tp, dev);
 
        ret = rtl8169_init_ring(dev);
        if (ret < 0)
@@ -3999,7 +3964,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
 static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
                                            struct net_device *dev,
                                            struct RxDesc *desc, int rx_buf_sz,
-                                           unsigned int align, gfp_t gfp)
+                                           unsigned int align)
 {
        struct sk_buff *skb;
        dma_addr_t mapping;
@@ -4007,7 +3972,7 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
 
        pad = align ? align : NET_IP_ALIGN;
 
-       skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
+       skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
        if (!skb)
                goto err_out;
 
@@ -4038,7 +4003,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
 }
 
 static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
-                          u32 start, u32 end, gfp_t gfp)
+                          u32 start, u32 end)
 {
        u32 cur;
 
@@ -4053,7 +4018,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
 
                skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
                                           tp->RxDescArray + i,
-                                          tp->rx_buf_sz, tp->align, gfp);
+                                          tp->rx_buf_sz, tp->align);
                if (!skb)
                        break;
 
@@ -4081,7 +4046,7 @@ static int rtl8169_init_ring(struct net_device *dev)
        memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
        memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
 
-       if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
+       if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
                goto err_out;
 
        rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@ -4332,7 +4297,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        tp->cur_tx += frags + 1;
 
-       wmb();
+       smp_wmb();
 
        RTL_W8(TxPoll, NPQ);    /* set polling bit */
 
@@ -4584,7 +4549,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
        count = cur_rx - tp->cur_rx;
        tp->cur_rx = cur_rx;
 
-       delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
+       delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
        if (!delta && count && netif_msg_intr(tp))
                printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
        tp->dirty_rx += delta;
@@ -4692,7 +4657,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
                 * until it does.
                 */
                tp->intr_mask = 0xffff;
-               wmb();
+               smp_wmb();
                RTL_W16(IntrMask, tp->intr_event);
        }
 
@@ -4830,8 +4795,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
                mc_filter[1] = swab32(data);
        }
 
-       RTL_W32(MAR0 + 4, mc_filter[1]);
        RTL_W32(MAR0 + 0, mc_filter[0]);
+       RTL_W32(MAR0 + 4, mc_filter[1]);
 
        RTL_W32(RxConfig, tmp);
 
index 5b07e002f4e9079aeab31b92967c679a390dbdd1..8f5414348e8629569a6fa12ff08f710d425abe6a 100644 (file)
@@ -40,7 +40,6 @@
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/mii.h>
-#include <linux/dmi.h>
 #include <asm/irq.h>
 
 #include "skge.h"
@@ -3891,8 +3890,6 @@ static void __devinit skge_show_addr(struct net_device *dev)
                       dev->name, dev->dev_addr);
 }
 
-static int only_32bit_dma;
-
 static int __devinit skge_probe(struct pci_dev *pdev,
                                const struct pci_device_id *ent)
 {
@@ -3914,7 +3911,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                using_dac = 1;
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4171,21 +4168,8 @@ static struct pci_driver skge_driver = {
        .shutdown =     skge_shutdown,
 };
 
-static struct dmi_system_id skge_32bit_dma_boards[] = {
-       {
-               .ident = "Gigabyte nForce boards",
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
-                       DMI_MATCH(DMI_BOARD_NAME, "nForce"),
-               },
-       },
-       {}
-};
-
 static int __init skge_init_module(void)
 {
-       if (dmi_check_system(skge_32bit_dma_boards))
-               only_32bit_dma = 1;
        skge_debug_init();
        return pci_register_driver(&skge_driver);
 }
index a17aaeed096c86233471b3f52d8a3b2df8df4499..f3600b3eb8c5e68edc9be162ee7b28d66abeda36 100644 (file)
@@ -704,24 +704,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 }
 
-/* Enable Rx/Tx */
-static void sky2_enable_rx_tx(struct sky2_port *sky2)
-{
-       struct sky2_hw *hw = sky2->hw;
-       unsigned port = sky2->port;
-       u16 reg;
-
-       reg = gma_read16(hw, port, GM_GP_CTRL);
-       reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
-       gma_write16(hw, port, GM_GP_CTRL, reg);
-}
-
 /* Force a renegotiation */
 static void sky2_phy_reinit(struct sky2_port *sky2)
 {
        spin_lock_bh(&sky2->phy_lock);
        sky2_phy_init(sky2->hw, sky2->port);
-       sky2_enable_rx_tx(sky2);
        spin_unlock_bh(&sky2->phy_lock);
 }
 
@@ -1021,8 +1008,11 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
 static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
 {
        struct sky2_tx_le *le = sky2->tx_le + *slot;
+       struct tx_ring_info *re = sky2->tx_ring + *slot;
 
        *slot = RING_NEXT(*slot, sky2->tx_ring_size);
+       re->flags = 0;
+       re->skb = NULL;
        le->ctrl = 0;
        return le;
 }
@@ -1590,7 +1580,8 @@ static unsigned tx_le_req(const struct sk_buff *skb)
        return count;
 }
 
-static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev,
+                         const struct tx_ring_info *re)
 {
        if (re->flags & TX_MAP_SINGLE)
                pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1600,7 +1591,6 @@ static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
                pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
                               pci_unmap_len(re, maplen),
                               PCI_DMA_TODEVICE);
-       re->flags = 0;
 }
 
 /*
@@ -1807,7 +1797,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
                        dev->stats.tx_packets++;
                        dev->stats.tx_bytes += skb->len;
 
-                       re->skb = NULL;
                        dev_kfree_skb_any(skb);
 
                        sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
@@ -1942,6 +1931,7 @@ static void sky2_link_up(struct sky2_port *sky2)
 {
        struct sky2_hw *hw = sky2->hw;
        unsigned port = sky2->port;
+       u16 reg;
        static const char *fc_name[] = {
                [FC_NONE]       = "none",
                [FC_TX]         = "tx",
@@ -1949,7 +1939,10 @@ static void sky2_link_up(struct sky2_port *sky2)
                [FC_BOTH]       = "both",
        };
 
-       sky2_enable_rx_tx(sky2);
+       /* enable Rx/Tx */
+       reg = gma_read16(hw, port, GM_GP_CTRL);
+       reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+       gma_write16(hw, port, GM_GP_CTRL, reg);
 
        gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
 
index b496fa6811e897554cab3c3fb43c8d52358f5be3..f9cdcbcb77d4989efe930f1014a9c32128081216 100644 (file)
@@ -85,7 +85,8 @@ struct smsc911x_data {
         */
        spinlock_t mac_lock;
 
-       /* spinlock to ensure register accesses are serialised */
+       /* spinlock to ensure 16-bit accesses are serialised.
+        * unused with a 32-bit bus */
        spinlock_t dev_lock;
 
        struct phy_device *phy_dev;
@@ -118,33 +119,37 @@ struct smsc911x_data {
        unsigned int hashlo;
 };
 
-static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+/* The 16-bit access functions are significantly slower, due to the locking
+ * necessary.  If your bus hardware can be configured to do this for you
+ * (in response to a single 32-bit operation from software), you should use
+ * the 32-bit access functions instead. */
+
+static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
 {
        if (pdata->config.flags & SMSC911X_USE_32BIT)
                return readl(pdata->ioaddr + reg);
 
-       if (pdata->config.flags & SMSC911X_USE_16BIT)
-               return ((readw(pdata->ioaddr + reg) & 0xFFFF) |
+       if (pdata->config.flags & SMSC911X_USE_16BIT) {
+               u32 data;
+               unsigned long flags;
+
+               /* these two 16-bit reads must be performed consecutively, so
+                * must not be interrupted by our own ISR (which would start
+                * another read operation) */
+               spin_lock_irqsave(&pdata->dev_lock, flags);
+               data = ((readw(pdata->ioaddr + reg) & 0xFFFF) |
                        ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16));
+               spin_unlock_irqrestore(&pdata->dev_lock, flags);
+
+               return data;
+       }
 
        BUG();
        return 0;
 }
 
-static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
-{
-       u32 data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&pdata->dev_lock, flags);
-       data = __smsc911x_reg_read(pdata, reg);
-       spin_unlock_irqrestore(&pdata->dev_lock, flags);
-
-       return data;
-}
-
-static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
-                                       u32 val)
+static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
+                                     u32 val)
 {
        if (pdata->config.flags & SMSC911X_USE_32BIT) {
                writel(val, pdata->ioaddr + reg);
@@ -152,54 +157,44 @@ static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
        }
 
        if (pdata->config.flags & SMSC911X_USE_16BIT) {
+               unsigned long flags;
+
+               /* these two 16-bit writes must be performed consecutively, so
+                * must not be interrupted by our own ISR (which would start
+                * another read operation) */
+               spin_lock_irqsave(&pdata->dev_lock, flags);
                writew(val & 0xFFFF, pdata->ioaddr + reg);
                writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2);
+               spin_unlock_irqrestore(&pdata->dev_lock, flags);
                return;
        }
 
        BUG();
 }
 
-static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
-                                     u32 val)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&pdata->dev_lock, flags);
-       __smsc911x_reg_write(pdata, reg, val);
-       spin_unlock_irqrestore(&pdata->dev_lock, flags);
-}
-
 /* Writes a packet to the TX_DATA_FIFO */
 static inline void
 smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
                      unsigned int wordcount)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&pdata->dev_lock, flags);
-
        if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
                while (wordcount--)
-                       __smsc911x_reg_write(pdata, TX_DATA_FIFO,
-                                            swab32(*buf++));
-               goto out;
+                       smsc911x_reg_write(pdata, TX_DATA_FIFO, swab32(*buf++));
+               return;
        }
 
        if (pdata->config.flags & SMSC911X_USE_32BIT) {
                writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
-               goto out;
+               return;
        }
 
        if (pdata->config.flags & SMSC911X_USE_16BIT) {
                while (wordcount--)
-                       __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
-               goto out;
+                       smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
+               return;
        }
 
        BUG();
-out:
-       spin_unlock_irqrestore(&pdata->dev_lock, flags);
 }
 
 /* Reads a packet out of the RX_DATA_FIFO */
@@ -207,31 +202,24 @@ static inline void
 smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
                     unsigned int wordcount)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&pdata->dev_lock, flags);
-
        if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
                while (wordcount--)
-                       *buf++ = swab32(__smsc911x_reg_read(pdata,
-                                                           RX_DATA_FIFO));
-               goto out;
+                       *buf++ = swab32(smsc911x_reg_read(pdata, RX_DATA_FIFO));
+               return;
        }
 
        if (pdata->config.flags & SMSC911X_USE_32BIT) {
                readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
-               goto out;
+               return;
        }
 
        if (pdata->config.flags & SMSC911X_USE_16BIT) {
                while (wordcount--)
-                       *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO);
-               goto out;
+                       *buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO);
+               return;
        }
 
        BUG();
-out:
-       spin_unlock_irqrestore(&pdata->dev_lock, flags);
 }
 
 /* waits for MAC not busy, with timeout.  Only called by smsc911x_mac_read
index fd6622ca4cd5918ae5d5a596efe3f166dd16b429..ba5d3fe753b694d58b93bedf2fc050f001995fef 100644 (file)
@@ -4995,7 +4995,7 @@ static void tg3_poll_controller(struct net_device *dev)
        struct tg3 *tp = netdev_priv(dev);
 
        for (i = 0; i < tp->irq_cnt; i++)
-               tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+               tg3_interrupt(tp->napi[i].irq_vec, dev);
 }
 #endif
 
@@ -5392,7 +5392,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
        mss = 0;
        if ((mss = skb_shinfo(skb)->gso_size) != 0) {
                struct iphdr *iph;
-               u32 tcp_opt_len, ip_tcp_len, hdr_len;
+               int tcp_opt_len, ip_tcp_len, hdr_len;
 
                if (skb_header_cloned(skb) &&
                    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,10 +5423,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
                                                                 IPPROTO_TCP,
                                                                 0);
 
-               if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
-                       mss |= hdr_len << 9;
-               else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
-                       GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+               if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
+                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
                        if (tcp_opt_len || iph->ihl > 5) {
                                int tsflags;
 
@@ -5461,9 +5459,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 
        would_hit_hwbug = 0;
 
-       if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
-               would_hit_hwbug = 1;
-
        if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
                would_hit_hwbug = 1;
        else if (tg3_4g_overflow_test(mapping, len))
@@ -5487,10 +5482,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
 
                        tnapi->tx_buffers[entry].skb = NULL;
 
-                       if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
-                               len <= 8)
-                                       would_hit_hwbug = 1;
-
                        if (tg3_4g_overflow_test(mapping, len))
                                would_hit_hwbug = 1;
 
@@ -8168,7 +8159,6 @@ static int tg3_test_msi(struct tg3 *tp)
        pci_disable_msi(tp->pdev);
 
        tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
-       tp->napi[0].irq_vec = tp->pdev->irq;
 
        err = tg3_request_irq(tp, 0);
        if (err)
@@ -12618,9 +12608,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                }
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
-               tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
-
        tp->irq_max = 1;
 
 #ifdef TG3_NAPI
@@ -13988,7 +13975,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                goto err_out_iounmap;
        }
 
-       if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+       if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                dev->netdev_ops = &tg3_netdev_ops;
        else
                dev->netdev_ops = &tg3_netdev_ops_dma_bug;
index 529f55ad16dbb8de49d1559f80f9b9ea23eb843e..bab7940158e65d208ca734f22d12e432620f69bb 100644 (file)
@@ -2759,9 +2759,6 @@ struct tg3 {
 #define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
 #define TG3_FLG3_PHY_IS_FET            0x00010000
 #define TG3_FLG3_ENABLE_RSS            0x00020000
-#define TG3_FLG3_4G_DMA_BNDRY_BUG      0x00080000
-#define TG3_FLG3_40BIT_DMA_LIMIT_BUG   0x00100000
-#define TG3_FLG3_SHORT_DMA_BUG  0x00200000
 
        struct timer_list               timer;
        u16                             timer_counter;
index 516713fa0a0570a4d3faf871fa4cf1089297981b..1cc8cf4425d18a576e5c8829a1bbfb0461bb0f18 100644 (file)
@@ -101,10 +101,6 @@ config TULIP_NAPI_HW_MITIGATION
 
          If in doubt, say Y.
 
-config TULIP_DM910X
-       def_bool y
-       depends on TULIP && SPARC
-
 config DE4X5
        tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
        depends on PCI || EISA
index b94370f7eb5afd8e32e01b446cfa5ee54e7d52d1..a45ded0538b84db192a1d2ff6bbc3ce1eb1ad0f3 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/irq.h>
 
-#ifdef CONFIG_TULIP_DM910X
-#include <linux/of.h>
-#endif
-
 
 /* Board/System/Debug information/definition ---------------- */
 #define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
@@ -381,23 +377,6 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
        if (!printed_version++)
                printk(version);
 
-       /*
-        *      SPARC on-board DM910x chips should be handled by the main
-        *      tulip driver, except for early DM9100s.
-        */
-#ifdef CONFIG_TULIP_DM910X
-       if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
-           ent->driver_data == PCI_DM9102_ID) {
-               struct device_node *dp = pci_device_to_OF_node(pdev);
-
-               if (dp && of_get_property(dp, "local-mac-address", NULL)) {
-                       printk(KERN_INFO DRV_NAME
-                              ": skipping on-board DM910x (use tulip)\n");
-                       return -ENODEV;
-               }
-       }
-#endif
-
        /* Init network device */
        dev = alloc_etherdev(sizeof(*db));
        if (dev == NULL)
index 88bf54f8562c5d2de4be39f2c681c419394cce83..6b2330e4206e12467c7e28c08e155a4c5bccf668 100644 (file)
@@ -196,13 +196,9 @@ struct tulip_chip_table tulip_tbl[] = {
        | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
 
   /* DM910X */
-#ifdef CONFIG_TULIP_DM910X
   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
        HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
        tulip_timer, tulip_media_task },
-#else
-  { NULL },
-#endif
 
   /* RS7112 */
   { "Conexant LANfinity", 256, 0x0001ebef,
@@ -232,10 +228,8 @@ static struct pci_device_id tulip_pci_tbl[] = {
        { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
        { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
        { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
-#ifdef CONFIG_TULIP_DM910X
        { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
        { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
-#endif
        { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
        { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
        { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
@@ -1305,30 +1299,18 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        }
 
        /*
-        *      DM910x chips should be handled by the dmfe driver, except
-        *      on-board chips on SPARC systems.  Also, early DM9100s need
-        *      software CRC which only the dmfe driver supports.
+        *      Early DM9100's need software CRC and the DMFE driver
         */
 
-#ifdef CONFIG_TULIP_DM910X
-       if (chip_idx == DM910X) {
-               struct device_node *dp;
-
-               if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
-                   pdev->revision < 0x30) {
-                       printk(KERN_INFO PFX
-                              "skipping early DM9100 with Crc bug (use dmfe)\n");
-                       return -ENODEV;
-               }
-
-               dp = pci_device_to_OF_node(pdev);
-               if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
-                       printk(KERN_INFO PFX
-                              "skipping DM910x expansion card (use dmfe)\n");
+       if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
+       {
+               /* Read Chip revision */
+               if (pdev->revision < 0x30)
+               {
+                       printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
                        return -ENODEV;
                }
        }
-#endif
 
        /*
         *      Looks for early PCI chipsets where people report hangs
index 0f77aca7280a2f1a7d780a910a9758853a6cb9ce..4fdfa2ae5418e6c030be80b5568e3c12dcbbf3f9 100644 (file)
@@ -1006,8 +1006,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                if (err < 0)
                        goto err_free_sk;
 
-               if (!net_eq(dev_net(tun->dev), &init_net) ||
-                   device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
+               if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
                        printk(KERN_ERR "Failed to create tun sysfs files\n");
index b4b25ffa3ab7c6a199852604997d00869eda1f1d..4469f2451a6f660a20fc37d0ca9e47ec4114b787 100644 (file)
@@ -1563,10 +1563,7 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
 
 static void ugeth_quiesce(struct ucc_geth_private *ugeth)
 {
-       /* Prevent any further xmits, plus detach the device. */
-       netif_device_detach(ugeth->ndev);
-
-       /* Wait for any current xmits to finish. */
+       /* Wait for and prevent any further xmits. */
        netif_tx_disable(ugeth->ndev);
 
        /* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1580,7 +1577,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
 {
        napi_enable(&ugeth->napi);
        enable_irq(ugeth->ug_info->uf_info.irq);
-       netif_device_attach(ugeth->ndev);
+       netif_tx_wake_all_queues(ugeth->ndev);
 }
 
 /* Called every time the controller might need to be made
@@ -3276,12 +3273,13 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
                /* Handle the transmitted buffer and release */
                /* the BD to be used with the current frame  */
 
-               skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
-               if (!skb)
+               if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
                        break;
 
                dev->stats.tx_packets++;
 
+               skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+
                if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
                             skb_recycle_check(skb,
                                    ugeth->ug_info->uf_info.max_rx_buf_length +
index e644f9afa32a494e1e68dcf5bcbdf5a8ecc5440c..6ce7f775bb7482a362d4dc1320570847bcfe7511 100644 (file)
@@ -54,7 +54,6 @@ static const char driver_name [] = "asix";
 #define AX_CMD_WRITE_IPG0              0x12
 #define AX_CMD_WRITE_IPG1              0x13
 #define AX_CMD_READ_NODE_ID            0x13
-#define AX_CMD_WRITE_NODE_ID           0x14
 #define AX_CMD_WRITE_IPG2              0x14
 #define AX_CMD_WRITE_MULTI_FILTER      0x16
 #define AX88172_CMD_READ_NODE_ID       0x17
@@ -166,7 +165,6 @@ static const char driver_name [] = "asix";
 /* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
 struct asix_data {
        u8 multi_filter[AX_MCAST_FILTER_SIZE];
-       u8 mac_addr[ETH_ALEN];
        u8 phymode;
        u8 ledmode;
        u8 eeprom_len;
@@ -730,30 +728,6 @@ static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
        return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
 }
 
-static int asix_set_mac_address(struct net_device *net, void *p)
-{
-       struct usbnet *dev = netdev_priv(net);
-       struct asix_data *data = (struct asix_data *)&dev->data;
-       struct sockaddr *addr = p;
-
-       if (netif_running(net))
-               return -EBUSY;
-       if (!is_valid_ether_addr(addr->sa_data))
-               return -EADDRNOTAVAIL;
-
-       memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
-
-       /* We use the 20 byte dev->data
-        * for our 6 byte mac buffer
-        * to avoid allocating memory that
-        * is tricky to free later */
-       memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
-       asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
-                                                       data->mac_addr);
-
-       return 0;
-}
-
 /* We need to override some ethtool_ops so we require our
    own structure so we don't interfere with other usbnet
    devices that may be connected at the same time. */
@@ -941,7 +915,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
        .ndo_change_mtu         = usbnet_change_mtu,
-       .ndo_set_mac_address    = asix_set_mac_address,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = asix_ioctl,
        .ndo_set_multicast_list = asix_set_multicast,
@@ -1234,7 +1208,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
        .ndo_stop               = usbnet_stop,
        .ndo_start_xmit         = usbnet_start_xmit,
        .ndo_tx_timeout         = usbnet_tx_timeout,
-       .ndo_set_mac_address    = asix_set_mac_address,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = asix_set_multicast,
        .ndo_do_ioctl           = asix_ioctl,
index 9a6eedef4afc9f8d8fc312ed4222f35cb33dc06b..a2b30a10064f131e996657fb5f03dd600c528be8 100644 (file)
@@ -238,7 +238,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
                goto out;
 
        dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
-       dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
+       dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
 
        for (i = 0; i < DM_TIMEOUT; i++) {
                u8 tmp;
index f450bc9a89ac23851a3889594e3bebb7b67a43c7..43bc3fcc0d8523e837a07dcd40f3b3ca738fe218 100644 (file)
@@ -1634,8 +1634,6 @@ static int hso_get_count(struct hso_serial *serial,
        struct uart_icount cnow;
        struct hso_tiocmget  *tiocmget = serial->tiocmget;
 
-       memset(&icount, 0, sizeof(struct serial_icounter_struct));
-
        if (!tiocmget)
                 return -ENOENT;
        spin_lock_irq(&serial->serial_lock);
index 31a5d3c15ae96f290bc9c81611d876c38f512abf..1fd70583be444089b3c6f402477f204bb3128335 100644 (file)
@@ -102,7 +102,6 @@ static const int multicast_filter_limit = 32;
 #include <linux/ethtool.h>
 #include <linux/crc32.h>
 #include <linux/bitops.h>
-#include <linux/workqueue.h>
 #include <asm/processor.h>     /* Processor type for cache alignment. */
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -390,7 +389,6 @@ struct rhine_private {
        struct net_device *dev;
        struct napi_struct napi;
        spinlock_t lock;
-       struct work_struct reset_task;
 
        /* Frequently used values: keep some adjacent for cache effect. */
        u32 quirks;
@@ -409,7 +407,6 @@ struct rhine_private {
 static int  mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  rhine_open(struct net_device *dev);
-static void rhine_reset_task(struct work_struct *work);
 static void rhine_tx_timeout(struct net_device *dev);
 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                  struct net_device *dev);
@@ -778,8 +775,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
        dev->irq = pdev->irq;
 
        spin_lock_init(&rp->lock);
-       INIT_WORK(&rp->reset_task, rhine_reset_task);
-
        rp->mii_if.dev = dev;
        rp->mii_if.mdio_read = mdio_read;
        rp->mii_if.mdio_write = mdio_write;
@@ -1184,18 +1179,22 @@ static int rhine_open(struct net_device *dev)
        return 0;
 }
 
-static void rhine_reset_task(struct work_struct *work)
+static void rhine_tx_timeout(struct net_device *dev)
 {
-       struct rhine_private *rp = container_of(work, struct rhine_private,
-                                               reset_task);
-       struct net_device *dev = rp->dev;
+       struct rhine_private *rp = netdev_priv(dev);
+       void __iomem *ioaddr = rp->base;
+
+       printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+              "%4.4x, resetting...\n",
+              dev->name, ioread16(ioaddr + IntrStatus),
+              mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
 
        /* protect against concurrent rx interrupts */
        disable_irq(rp->pdev->irq);
 
        napi_disable(&rp->napi);
 
-       spin_lock_bh(&rp->lock);
+       spin_lock(&rp->lock);
 
        /* clear all descriptors */
        free_tbufs(dev);
@@ -1207,7 +1206,7 @@ static void rhine_reset_task(struct work_struct *work)
        rhine_chip_reset(dev);
        init_registers(dev);
 
-       spin_unlock_bh(&rp->lock);
+       spin_unlock(&rp->lock);
        enable_irq(rp->pdev->irq);
 
        dev->trans_start = jiffies;
@@ -1215,19 +1214,6 @@ static void rhine_reset_task(struct work_struct *work)
        netif_wake_queue(dev);
 }
 
-static void rhine_tx_timeout(struct net_device *dev)
-{
-       struct rhine_private *rp = netdev_priv(dev);
-       void __iomem *ioaddr = rp->base;
-
-       printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
-              "%4.4x, resetting...\n",
-              dev->name, ioread16(ioaddr + IntrStatus),
-              mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
-
-       schedule_work(&rp->reset_task);
-}
-
 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                  struct net_device *dev)
 {
@@ -1844,12 +1830,11 @@ static int rhine_close(struct net_device *dev)
        struct rhine_private *rp = netdev_priv(dev);
        void __iomem *ioaddr = rp->base;
 
-       napi_disable(&rp->napi);
-       cancel_work_sync(&rp->reset_task);
-       netif_stop_queue(dev);
-
        spin_lock_irq(&rp->lock);
 
+       netif_stop_queue(dev);
+       napi_disable(&rp->napi);
+
        if (debug > 1)
                printk(KERN_DEBUG "%s: Shutting down ethercard, "
                       "status was %4.4x.\n",
index 74b9d7d4a3efae426b56d02ae70f5eec8d5d993c..e04e5bee005ca36eaff7ebe8c9ccd4ae8c34e3ef 100644 (file)
@@ -2186,6 +2186,8 @@ static int velocity_open(struct net_device *dev)
        /* Ensure chip is running */
        pci_set_power_state(vptr->pdev, PCI_D0);
 
+       velocity_give_many_rx_descs(vptr);
+
        velocity_init_registers(vptr, VELOCITY_INIT_COLD);
 
        ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
@@ -2197,8 +2199,6 @@ static int velocity_open(struct net_device *dev)
                goto out;
        }
 
-       velocity_give_many_rx_descs(vptr);
-
        mac_enable_int(vptr->mac_regs);
        netif_start_queue(dev);
        vptr->flags |= VELOCITY_FLAGS_OPENED;
@@ -2287,10 +2287,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
 
                dev->mtu = new_mtu;
 
-               velocity_init_registers(vptr, VELOCITY_INIT_COLD);
-
                velocity_give_many_rx_descs(vptr);
 
+               velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+
                mac_enable_int(vptr->mac_regs);
                netif_start_queue(dev);
 
index 7e3788d543109d11f0bf711ffff203498655aebf..b9e002fccbca22e20541136a3779c9f56bc2220c 100644 (file)
@@ -398,7 +398,8 @@ static void refill_work(struct work_struct *work)
 
        vi = container_of(work, struct virtnet_info, refill.work);
        napi_disable(&vi->napi);
-       still_empty = !try_fill_recv(vi, GFP_KERNEL);
+       try_fill_recv(vi, GFP_KERNEL);
+       still_empty = (vi->num == 0);
        napi_enable(&vi->napi);
 
        /* In theory, this can happen: if we don't get any buffers in
index 1a11d955f2151a92547344920c25886cafc7ff1f..baa051d5bfbe0bcd84bd9f28530ac73f6d4863d8 100644 (file)
@@ -1619,7 +1619,6 @@ static void backend_changed(struct xenbus_device *dev,
                if (xennet_connect(netdev) != 0)
                        break;
                xenbus_switch_state(dev, XenbusStateConnected);
-               netif_notify_peers(netdev);
                break;
 
        case XenbusStateClosing:
index 5c4df24eae4bb0066fa34a616d9324996f4d5c07..c9e2ae90f19508db8f74f91440636689d50321d8 100644 (file)
@@ -140,6 +140,16 @@ static struct notifier_block module_load_nb = {
        .notifier_call = module_load_notify,
 };
 
+
+static void end_sync(void)
+{
+       end_cpu_work();
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+}
+
+
 int sync_start(void)
 {
        int err;
@@ -147,7 +157,7 @@ int sync_start(void)
        if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
 
-       mutex_lock(&buffer_mutex);
+       start_cpu_work();
 
        err = task_handoff_register(&task_free_nb);
        if (err)
@@ -162,10 +172,7 @@ int sync_start(void)
        if (err)
                goto out4;
 
-       start_cpu_work();
-
 out:
-       mutex_unlock(&buffer_mutex);
        return err;
 out4:
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -174,6 +181,7 @@ out3:
 out2:
        task_handoff_unregister(&task_free_nb);
 out1:
+       end_sync();
        free_cpumask_var(marked_cpus);
        goto out;
 }
@@ -181,20 +189,11 @@ out1:
 
 void sync_stop(void)
 {
-       /* flush buffers */
-       mutex_lock(&buffer_mutex);
-       end_cpu_work();
        unregister_module_notifier(&module_load_nb);
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
        profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
        task_handoff_unregister(&task_free_nb);
-       mutex_unlock(&buffer_mutex);
-       flush_scheduled_work();
-
-       /* make sure we don't leak task structs */
-       process_task_mortuary();
-       process_task_mortuary();
-
+       end_sync();
        free_cpumask_var(marked_cpus);
 }
 
index 5e2ac4aea949c381f492cf264f827dfe4fc59aae..a7aae24f2889a4578ebd90dad52ae72aab916116 100644 (file)
 
 #define OP_BUFFER_FLAGS        0
 
-static struct ring_buffer *op_ring_buffer;
+/*
+ * Read and write access is using spin locking. Thus, writing to the
+ * buffer by NMI handler (x86) could occur also during critical
+ * sections when reading the buffer. To avoid this, there are 2
+ * buffers for independent read and write access. Read access is in
+ * process context only, write access only in the NMI handler. If the
+ * read buffer runs empty, both buffers are swapped atomically. There
+ * is potentially a small window during swapping where the buffers are
+ * disabled and samples could be lost.
+ *
+ * Using 2 buffers is a little bit overhead, but the solution is clear
+ * and does not require changes in the ring buffer implementation. It
+ * can be changed to a single buffer solution when the ring buffer
+ * access is implemented as non-locking atomic code.
+ */
+static struct ring_buffer *op_ring_buffer_read;
+static struct ring_buffer *op_ring_buffer_write;
 DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
 
 static void wq_sync_buffer(struct work_struct *work);
@@ -53,9 +69,12 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
 
 void free_cpu_buffers(void)
 {
-       if (op_ring_buffer)
-               ring_buffer_free(op_ring_buffer);
-       op_ring_buffer = NULL;
+       if (op_ring_buffer_read)
+               ring_buffer_free(op_ring_buffer_read);
+       op_ring_buffer_read = NULL;
+       if (op_ring_buffer_write)
+               ring_buffer_free(op_ring_buffer_write);
+       op_ring_buffer_write = NULL;
 }
 
 #define RB_EVENT_HDR_SIZE 4
@@ -68,8 +87,11 @@ int alloc_cpu_buffers(void)
        unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
                                                 RB_EVENT_HDR_SIZE);
 
-       op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
-       if (!op_ring_buffer)
+       op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+       if (!op_ring_buffer_read)
+               goto fail;
+       op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+       if (!op_ring_buffer_write)
                goto fail;
 
        for_each_possible_cpu(i) {
@@ -121,6 +143,8 @@ void end_cpu_work(void)
 
                cancel_delayed_work(&b->work);
        }
+
+       flush_scheduled_work();
 }
 
 /*
@@ -139,11 +163,16 @@ struct op_sample
 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
 {
        entry->event = ring_buffer_lock_reserve
-               (op_ring_buffer, sizeof(struct op_sample) +
+               (op_ring_buffer_write, sizeof(struct op_sample) +
                 size * sizeof(entry->sample->data[0]));
-       if (!entry->event)
+       if (entry->event)
+               entry->sample = ring_buffer_event_data(entry->event);
+       else
+               entry->sample = NULL;
+
+       if (!entry->sample)
                return NULL;
-       entry->sample = ring_buffer_event_data(entry->event);
+
        entry->size = size;
        entry->data = entry->sample->data;
 
@@ -152,16 +181,25 @@ struct op_sample
 
 int op_cpu_buffer_write_commit(struct op_entry *entry)
 {
-       return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
+       return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
 }
 
 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
 {
        struct ring_buffer_event *e;
-       e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
-       if (!e)
+       e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
+       if (e)
+               goto event;
+       if (ring_buffer_swap_cpu(op_ring_buffer_read,
+                                op_ring_buffer_write,
+                                cpu))
                return NULL;
+       e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
+       if (e)
+               goto event;
+       return NULL;
 
+event:
        entry->event = e;
        entry->sample = ring_buffer_event_data(e);
        entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
@@ -172,7 +210,8 @@ struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
 
 unsigned long op_cpu_buffer_entries(int cpu)
 {
-       return ring_buffer_entries_cpu(op_ring_buffer, cpu);
+       return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
+               + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
 }
 
 static int
index cc31baa31659782183de1980513265f3882edb31..9581d3619450d609ca69e305d4fcad7148f9ab93 100644 (file)
@@ -182,18 +182,16 @@ static int led_proc_read(char *page, char **start, off_t off, int count,
 static int led_proc_write(struct file *file, const char *buf, 
        unsigned long count, void *data)
 {
-       char *cur, lbuf[32];
+       char *cur, lbuf[count + 1];
        int d;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
 
-       if (count >= sizeof(lbuf))
-               count = sizeof(lbuf)-1;
+       memset(lbuf, 0, count + 1);
 
        if (copy_from_user(lbuf, buf, count))
                return -EFAULT;
-       lbuf[count] = 0;
 
        cur = lbuf;
 
index 5becbdee4027019a5c584df5ff5fb3cc99b04cc3..c1abac8ab5c326847ca8cca0c2eb33b1fcae4e1e 100644 (file)
@@ -245,7 +245,7 @@ static void __init print_ebda_hpc (void)
 
 int __init ibmphp_access_ebda (void)
 {
-       u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz;
+       u8 format, num_ctlrs, rio_complete, hs_complete;
        u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base;
        int rc = 0;
 
@@ -260,16 +260,7 @@ int __init ibmphp_access_ebda (void)
        iounmap (io_mem);
        debug ("returned ebda segment: %x\n", ebda_seg);
        
-       io_mem = ioremap(ebda_seg<<4, 1);
-       if (!io_mem)
-               return -ENOMEM;
-       ebda_sz = readb(io_mem);
-       iounmap(io_mem);
-       debug("ebda size: %d(KiB)\n", ebda_sz);
-       if (ebda_sz == 0)
-               return -ENOMEM;
-
-       io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024));
+       io_mem = ioremap(ebda_seg<<4, 1024);
        if (!io_mem )
                return -ENOMEM;
        next_offset = 0x180;
index ba83495fb5a6e458f75100c34d54adc13260686a..2498602151e6cb081189d81ac62eb2850c52abfb 100644 (file)
 #define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
 #define DMA_64BIT_PFN          IOVA_PFN(DMA_BIT_MASK(64))
 
-/* page table handling */
-#define LEVEL_STRIDE           (9)
-#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
-       return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
-       return 30 + agaw * LEVEL_STRIDE;
-}
-
-static inline int width_to_agaw(int width)
-{
-       return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
-       return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
-       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
-       return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
-       return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
-       return (pfn + level_size(level) - 1) & level_mask(level);
-}
 
 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
    are never going to work. */
@@ -492,6 +449,8 @@ void free_iova_mem(struct iova *iova)
 }
 
 
+static inline int width_to_agaw(int width);
+
 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
 {
        unsigned long sagaw;
@@ -705,6 +664,51 @@ out:
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
+/* page table handling */
+#define LEVEL_STRIDE           (9)
+#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+       return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+       return 30 + agaw * LEVEL_STRIDE;
+
+}
+
+static inline int width_to_agaw(int width)
+{
+       return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+       return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+       return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+       return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+       return (pfn + level_size(level) - 1) & level_mask(level);
+}
+
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                                      unsigned long pfn)
 {
index 0fb1d0542339281533895b53bbf3aba8988c64af..f9cf3173b23dcc9e8bea22d1ff6679523bcb0944 100644 (file)
@@ -195,9 +195,6 @@ void unmask_msi_irq(unsigned int irq)
 void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
 {
        struct msi_desc *entry = get_irq_desc_msi(desc);
-
-       BUG_ON(entry->dev->current_state != PCI_D0);
-
        if (entry->msi_attrib.is_msix) {
                void __iomem *base = entry->mask_base +
                        entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
@@ -231,32 +228,10 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
        read_msi_msg_desc(desc, msg);
 }
 
-void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
-{
-       struct msi_desc *entry = get_irq_desc_msi(desc);
-
-       /* Assert that the cache is valid, assuming that
-        * valid messages are not all-zeroes. */
-       BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
-                entry->msg.data));
-
-       *msg = entry->msg;
-}
-
-void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       get_cached_msi_msg_desc(desc, msg);
-}
-
 void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
 {
        struct msi_desc *entry = get_irq_desc_msi(desc);
-
-       if (entry->dev->current_state != PCI_D0) {
-               /* Don't touch the hardware now */
-       } else if (entry->msi_attrib.is_msix) {
+       if (entry->msi_attrib.is_msix) {
                void __iomem *base;
                base = entry->mask_base +
                        entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
index 3a3b9110db3ea564d9587aba7a2a3fdd873c78c6..0f6382f090ee575f04add353066be31abbb05336 100644 (file)
@@ -662,21 +662,17 @@ void pci_remove_legacy_files(struct pci_bus *b)
 
 #ifdef HAVE_PCI_MMAP
 
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
-                 enum pci_mmap_api mmap_api)
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
 {
-       unsigned long nr, start, size, pci_start;
+       unsigned long nr, start, size;
 
-       if (pci_resource_len(pdev, resno) == 0)
-               return 0;
        nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        start = vma->vm_pgoff;
        size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
-       pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
-                       pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
-       if (start >= pci_start && start < pci_start + size &&
-                       start + nr <= pci_start + size)
+       if (start < size && size - start >= nr)
                return 1;
+       WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
+               current->comm, start, start+nr, pci_name(pdev), resno, size);
        return 0;
 }
 
@@ -706,14 +702,8 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
        if (i >= PCI_ROM_RESOURCE)
                return -ENODEV;
 
-       if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
-               WARN(1, "process \"%s\" tried to map 0x%08lx bytes "
-                       "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
-                       current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
-                       pci_name(pdev), i,
-                       pci_resource_start(pdev, i), pci_resource_len(pdev, i));
+       if (!pci_mmap_fits(pdev, i, vma))
                return -EINVAL;
-       }
 
        /* pci_mmap_page_range() expects the same kind of entry as coming
         * from /proc/bus/pci/ which is a "user visible" value. If this is
index 812d4ac6bd2fbe4ed04e28f010ebaea8508a7c65..64777220a719877c0d9aec17a133a050948cdf26 100644 (file)
@@ -601,7 +601,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
  */
 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 {
-       return state >= PCI_D0 ?
+       return state > PCI_D0 ?
                        pci_platform_power_transition(dev, state) : -EINVAL;
 }
 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -638,6 +638,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
                 */
                return 0;
 
+       /* Check if we're already there */
+       if (dev->current_state == state)
+               return 0;
+
        __pci_start_power_transition(dev, state);
 
        /* This device is quirked not to be put into D3, so
@@ -2046,7 +2050,6 @@ void pci_msi_off(struct pci_dev *dev)
                pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
        }
 }
-EXPORT_SYMBOL_GPL(pci_msi_off);
 
 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK
 /*
@@ -2347,17 +2350,18 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
  */
 int pcix_get_max_mmrbc(struct pci_dev *dev)
 {
-       int cap;
+       int err, cap;
        u32 stat;
 
        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
        if (!cap)
                return -EINVAL;
 
-       if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
+       err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
+       if (err)
                return -EINVAL;
 
-       return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
+       return (stat & PCI_X_STATUS_MAX_READ) >> 12;
 }
 EXPORT_SYMBOL(pcix_get_max_mmrbc);
 
@@ -2370,17 +2374,18 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
  */
 int pcix_get_mmrbc(struct pci_dev *dev)
 {
-       int cap;
-       u16 cmd;
+       int ret, cap;
+       u32 cmd;
 
        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
        if (!cap)
                return -EINVAL;
 
-       if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
-               return -EINVAL;
+       ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
+       if (!ret)
+               ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
 
-       return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
+       return ret;
 }
 EXPORT_SYMBOL(pcix_get_mmrbc);
 
@@ -2395,27 +2400,28 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
  */
 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
 {
-       int cap;
-       u32 stat, v, o;
-       u16 cmd;
+       int cap, err = -EINVAL;
+       u32 stat, cmd, v, o;
 
        if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
-               return -EINVAL;
+               goto out;
 
        v = ffs(mmrbc) - 10;
 
        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
        if (!cap)
-               return -EINVAL;
+               goto out;
 
-       if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
-               return -EINVAL;
+       err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
+       if (err)
+               goto out;
 
        if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
                return -E2BIG;
 
-       if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
-               return -EINVAL;
+       err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
+       if (err)
+               goto out;
 
        o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
        if (o != v) {
@@ -2425,10 +2431,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
 
                cmd &= ~PCI_X_CMD_MAX_READ;
                cmd |= v << 2;
-               if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
-                       return -EIO;
+               err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
        }
-       return 0;
+out:
+       return err;
 }
 EXPORT_SYMBOL(pcix_set_mmrbc);
 
@@ -2538,23 +2544,6 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
        return 0;
 }
 
-/* Some architectures require additional programming to enable VGA */
-static arch_set_vga_state_t arch_set_vga_state;
-
-void __init pci_register_set_vga_state(arch_set_vga_state_t func)
-{
-       arch_set_vga_state = func;      /* NULL disables */
-}
-
-static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
-                     unsigned int command_bits, bool change_bridge)
-{
-       if (arch_set_vga_state)
-               return arch_set_vga_state(dev, decode, command_bits,
-                                               change_bridge);
-       return 0;
-}
-
 /**
  * pci_set_vga_state - set VGA decode state on device and parents if requested
  * @dev: the PCI device
@@ -2568,15 +2557,9 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
        struct pci_bus *bus;
        struct pci_dev *bridge;
        u16 cmd;
-       int rc;
 
        WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
 
-       /* ARCH specific VGA enables */
-       rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
-       if (rc)
-               return rc;
-
        pci_read_config_word(dev, PCI_COMMAND, &cmd);
        if (decode == true)
                cmd |= command_bits;
@@ -2823,3 +2806,4 @@ EXPORT_SYMBOL(pci_target_state);
 EXPORT_SYMBOL(pci_prepare_to_sleep);
 EXPORT_SYMBOL(pci_back_from_sleep);
 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
+
index bfc3337adcd1b170b2c23dc5bea3e9316d0e4807..d92d1954a2fb17ea0415772531ee27563ec02da3 100644 (file)
@@ -13,13 +13,8 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
 extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
 extern void pci_cleanup_rom(struct pci_dev *dev);
 #ifdef HAVE_PCI_MMAP
-enum pci_mmap_api {
-       PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
-       PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
-};
 extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
-                        struct vm_area_struct *vmai,
-                        enum pci_mmap_api mmap_api);
+                        struct vm_area_struct *vma);
 #endif
 int pci_probe_reset_function(struct pci_dev *dev);
 
index b8fb987e7600e753e3e4a71ebecbb4508404d7dd..0d91a8a4d278a9d38b72c74a87c148b269f0dbea 100644 (file)
@@ -302,7 +302,7 @@ static int aer_inject(struct aer_error_inj *einj)
        unsigned long flags;
        unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
        int pos_cap_err, rp_pos_cap_err;
-       u32 sever, cor_mask, uncor_mask;
+       u32 sever;
        int ret = 0;
 
        dev = pci_get_bus_and_slot(einj->bus, devfn);
@@ -320,9 +320,6 @@ static int aer_inject(struct aer_error_inj *einj)
                goto out_put;
        }
        pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
-       pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
-       pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
-                             &uncor_mask);
 
        rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
        if (!rp_pos_cap_err) {
@@ -357,21 +354,6 @@ static int aer_inject(struct aer_error_inj *einj)
        err->header_log2 = einj->header_log2;
        err->header_log3 = einj->header_log3;
 
-       if (einj->cor_status && !(einj->cor_status & ~cor_mask)) {
-               ret = -EINVAL;
-               printk(KERN_WARNING "The correctable error(s) is masked "
-                               "by device\n");
-               spin_unlock_irqrestore(&inject_lock, flags);
-               goto out_put;
-       }
-       if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) {
-               ret = -EINVAL;
-               printk(KERN_WARNING "The uncorrectable error(s) is masked "
-                               "by device\n");
-               spin_unlock_irqrestore(&inject_lock, flags);
-               goto out_put;
-       }
-
        rperr = __find_aer_error_by_dev(rpdev);
        if (!rperr) {
                rperr = rperr_alloc;
index 72fa87c095d8c6752232e070c70dd96f74ed9111..9f5ccbeb4fa5fcef281eb2ed3d3a079ae885909e 100644 (file)
@@ -78,15 +78,19 @@ EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 {
        int pos;
-       u32 status;
+       u32 status, mask;
 
        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
        if (!pos)
                return -EIO;
 
        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
-       if (status)
-               pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+       pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+       if (dev->error_state == pci_channel_io_normal)
+               status &= ~mask; /* Clear corresponding nonfatal bits */
+       else
+               status &= mask; /* Clear corresponding fatal bits */
+       pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 
        return 0;
 }
index a03ad8cfc6b99ed938b5b84cf970ec25de88ce78..593bb844b8db68258a41647f709f1d5d6807640d 100644 (file)
@@ -259,7 +259,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
 
        /* Make sure the caller is mapping a real resource for this device */
        for (i = 0; i < PCI_ROM_RESOURCE; i++) {
-               if (pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
+               if (pci_mmap_fits(dev, i, vma))
                        break;
        }
 
index 4633fc228603b5477f91b7a9a7979659fedf4280..245d2cdb47651d4d095edd847417b5251565fc69 100644 (file)
@@ -154,26 +154,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1,       quirk_isa_d
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_2,       quirk_isa_dma_hangs);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_3,       quirk_isa_dma_hangs);
 
-/*
- * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
- * for some HT machines to use C4 w/o hanging.
- */
-static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
-{
-       u32 pmbase;
-       u16 pm1a;
-
-       pci_read_config_dword(dev, 0x40, &pmbase);
-       pmbase = pmbase & 0xff80;
-       pm1a = inw(pmbase);
-
-       if (pm1a & 0x10) {
-               dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
-               outw(0x10, pmbase);
-       }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
-
 /*
  *     Chipsets where PCI->PCI transfers vanish or hang
  */
@@ -1464,8 +1444,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
        conf5 &= ~(1 << 24);  /* Clear bit 24 */
 
        switch (pdev->device) {
-       case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
-       case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
+       case PCI_DEVICE_ID_JMICRON_JMB360:
                /* The controller should be in single function ahci mode */
                conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
                break;
@@ -1501,14 +1480,12 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
@@ -2104,7 +2081,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disabl
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
 
 /* Disable MSI on chipsets that are known to not support it */
 static void __devinit quirk_disable_msi(struct pci_dev *dev)
@@ -2116,8 +2092,6 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev)
        }
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
 
 /* Go through the list of Hypertransport capabilities and
  * return 1 if a HT MSI capability is found and enabled */
@@ -2209,16 +2183,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
                         ht_enable_msi_mapping);
 
-/* The P5N32-SLI motherboards from Asus have a problem with msi
+/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
  * for the MCP55 NIC. It is not yet determined whether the msi problem
  * also affects other devices. As for now, turn off msi for this device.
  */
 static void __devinit nvenet_msi_disable(struct pci_dev *dev)
 {
-       if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
-           dmi_name_in_vendors("P5N32-E SLI")) {
+       if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
                dev_info(&dev->dev,
-                        "Disabling msi for MCP55 NIC on P5N32-SLI\n");
+                        "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
                dev->no_msi = 1;
        }
 }
@@ -2377,9 +2350,6 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
        int pos;
        int found;
 
-       if (!pci_msi_enabled())
-               return;
-
        /* check if there is HT MSI cap or enabled on this device */
        found = ht_check_msi_mapping(dev);
 
@@ -2543,7 +2513,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
 
 #endif /* CONFIG_PCI_IOV */
 
index 7905285f707ee7a4c0823d809e53d1a3260b2b56..d919e96c0afd62f5bde18da2ca0a974b63b66079 100644 (file)
@@ -39,7 +39,7 @@ module_param(io_speed, int, 0444);
 #ifdef CONFIG_PCMCIA_PROBE
 #include <asm/irq.h>
 /* mask of IRQs already reserved by other cards, we should avoid using them */
-static u8 pcmcia_used_irq[32];
+static u8 pcmcia_used_irq[NR_IRQS];
 #endif
 
 
@@ -719,9 +719,6 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
                for (try = 0; try < 64; try++) {
                        irq = try % 32;
 
-                       if (irq > NR_IRQS)
-                               continue;
-
                        /* marked as available by driver, and not blocked by userspace? */
                        if (!((mask >> irq) & 1))
                                continue;
index 6e2a4ca4162edc5ef7c3a856aa714e06dc249b20..55ca39dea42e7f4177a3cd9f79f709cfb7dd1a64 100644 (file)
@@ -291,15 +291,9 @@ config THINKPAD_ACPI_VIDEO
          server running, phase of the moon, and the current mood of
          Schroedinger's cat.  If you can use X.org's RandR to control
          your ThinkPad's video output ports instead of this feature,
-         don't think twice: do it and say N here to save memory and avoid
-         bad interactions with X.org.
+         don't think twice: do it and say N here to save some memory.
 
-         NOTE: access to this feature is limited to processes with the
-         CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
-         where it interacts badly with X.org.
-
-         If you are not sure, say Y here but do try to check if you could
-         be using X.org RandR instead.
+         If you are not sure, say Y here.
 
 config THINKPAD_ACPI_HOTKEY_POLL
        bool "Support NVRAM polling for hot keys"
index c533b1c6556c9565fa50ca84e7942af6439ac794..4226e535273874fb06aea0344c4c8ce00a8b957c 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/rfkill.h>
 #include <linux/pci.h>
 #include <linux/pci_hotplug.h>
-#include <linux/dmi.h>
 
 #define EEEPC_LAPTOP_VERSION   "0.1"
 
@@ -136,8 +135,6 @@ struct eeepc_hotk {
        acpi_handle handle;             /* the handle of the hotk device */
        u32 cm_supported;               /* the control methods supported
                                           by this BIOS */
-       bool cpufv_disabled;
-       bool hotplug_disabled;
        uint init_flag;                 /* Init flags */
        u16 event_count[128];           /* count for each event */
        struct input_dev *inputdev;
@@ -254,14 +251,6 @@ MODULE_AUTHOR("Corentin Chary, Eric Cooper");
 MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
 MODULE_LICENSE("GPL");
 
-static bool hotplug_disabled;
-
-module_param(hotplug_disabled, bool, 0644);
-MODULE_PARM_DESC(hotplug_disabled,
-                "Disable hotplug for wireless device. "
-                "If your laptop need that, please report to "
-                "acpi4asus-user@lists.sourceforge.net.");
-
 /*
  * ACPI Helpers
  */
@@ -478,8 +467,6 @@ static ssize_t store_cpufv(struct device *dev,
        struct eeepc_cpufv c;
        int rv, value;
 
-       if (ehotk->cpufv_disabled)
-               return -EPERM;
        if (get_cpufv(&c))
                return -ENODEV;
        rv = parse_arg(buf, count, &value);
@@ -491,38 +478,6 @@ static ssize_t store_cpufv(struct device *dev,
        return rv;
 }
 
-static ssize_t show_cpufv_disabled(struct device *dev,
-                         struct device_attribute *attr,
-                         char *buf)
-{
-       return sprintf(buf, "%d\n", ehotk->cpufv_disabled);
-}
-
-static ssize_t store_cpufv_disabled(struct device *dev,
-                          struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       int rv, value;
-
-       rv = parse_arg(buf, count, &value);
-       if (rv < 0)
-               return rv;
-
-       switch (value) {
-       case 0:
-               if (ehotk->cpufv_disabled)
-                       pr_warning("cpufv enabled (not officially supported "
-                               "on this model)\n");
-               ehotk->cpufv_disabled = false;
-               return rv;
-       case 1:
-               return -EPERM;
-       default:
-               return -EINVAL;
-       }
-}
-
-
 static struct device_attribute dev_attr_cpufv = {
        .attr = {
                .name = "cpufv",
@@ -538,22 +493,12 @@ static struct device_attribute dev_attr_available_cpufv = {
        .show   = show_available_cpufv
 };
 
-static struct device_attribute dev_attr_cpufv_disabled = {
-       .attr = {
-               .name = "cpufv_disabled",
-               .mode = 0644 },
-       .show   = show_cpufv_disabled,
-       .store  = store_cpufv_disabled
-};
-
-
 static struct attribute *platform_attributes[] = {
        &dev_attr_camera.attr,
        &dev_attr_cardr.attr,
        &dev_attr_disp.attr,
        &dev_attr_cpufv.attr,
        &dev_attr_available_cpufv.attr,
-       &dev_attr_cpufv_disabled.attr,
        NULL
 };
 
@@ -619,54 +564,6 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
        return -EINVAL;
 }
 
-static void eeepc_dmi_check(void)
-{
-       const char *model;
-
-       model = dmi_get_system_info(DMI_PRODUCT_NAME);
-       if (!model)
-               return;
-
-       /*
-        * Blacklist for setting cpufv (cpu speed).
-        *
-        * EeePC 4G ("701") implements CFVS, but it is not supported
-        * by the pre-installed OS, and the original option to change it
-        * in the BIOS setup screen was removed in later versions.
-        *
-        * Judging by the lack of "Super Hybrid Engine" on Asus product pages,
-        * this applies to all "701" models (4G/4G Surf/2G Surf).
-        *
-        * So Asus made a deliberate decision not to support it on this model.
-        * We have several reports that using it can cause the system to hang
-        *
-        * The hang has also been reported on a "702" (Model name "8G"?).
-        *
-        * We avoid dmi_check_system() / dmi_match(), because they use
-        * substring matching.  We don't want to affect the "701SD"
-        * and "701SDX" models, because they do support S.H.E.
-        */
-       if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
-               ehotk->cpufv_disabled = true;
-               pr_info("model %s does not officially support setting cpu "
-                       "speed\n", model);
-               pr_info("cpufv disabled to avoid instability\n");
-       }
-
-       /*
-        * Blacklist for wlan hotplug
-        *
-        * Eeepc 1005HA doesn't work like others models and don't need the
-        * hotplug code. In fact, current hotplug code seems to unplug another
-        * device...
-        */
-       if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
-           strcmp(model, "1005PE") == 0) {
-               ehotk->hotplug_disabled = true;
-               pr_info("wlan hotplug disabled\n");
-       }
-}
-
 static void cmsg_quirk(int cm, const char *name)
 {
        int dummy;
@@ -752,8 +649,6 @@ static void eeepc_rfkill_hotplug(void)
        struct pci_dev *dev;
        struct pci_bus *bus;
        bool blocked = eeepc_wlan_rfkill_blocked();
-       bool absent;
-       u32 l;
 
        if (ehotk->wlan_rfkill)
                rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
@@ -767,22 +662,6 @@ static void eeepc_rfkill_hotplug(void)
                        goto out_unlock;
                }
 
-               if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
-                       pr_err("Unable to read PCI config space?\n");
-                       goto out_unlock;
-               }
-               absent = (l == 0xffffffff);
-
-               if (blocked != absent) {
-                       pr_warning("BIOS says wireless lan is %s, "
-                                       "but the pci device is %s\n",
-                               blocked ? "blocked" : "unblocked",
-                               absent ? "absent" : "present");
-                       pr_warning("skipped wireless hotplug as probably "
-                                       "inappropriate for this model\n");
-                       goto out_unlock;
-               }
-
                if (!blocked) {
                        dev = pci_get_slot(bus, 0);
                        if (dev) {
@@ -1216,9 +1095,6 @@ static int eeepc_rfkill_init(struct device *dev)
        if (result && result != -ENODEV)
                goto exit;
 
-       if (ehotk->hotplug_disabled)
-               return 0;
-
        result = eeepc_setup_pci_hotplug();
        /*
         * If we get -EBUSY then something else is handling the PCI hotplug -
@@ -1332,10 +1208,6 @@ static int __devinit eeepc_hotk_add(struct acpi_device *device)
        device->driver_data = ehotk;
        ehotk->device = device;
 
-       ehotk->hotplug_disabled = hotplug_disabled;
-
-       eeepc_dmi_check();
-
        result = eeepc_hotk_check();
        if (result)
                goto fail_platform_driver;
index 7e51d5be8cc07fd65c7e5df66992b156c5b5d6f7..1ee734c14cc11cc406209dde4fd3350765ed48f2 100644 (file)
@@ -22,7 +22,7 @@
  */
 
 #define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020600
+#define TPACPI_SYSFS_VERSION 0x020500
 
 /*
  *  Changelog:
@@ -61,7 +61,6 @@
 
 #include <linux/nvram.h>
 #include <linux/proc_fs.h>
-#include <linux/seq_file.h>
 #include <linux/sysfs.h>
 #include <linux/backlight.h>
 #include <linux/fb.h>
@@ -257,7 +256,7 @@ struct tp_acpi_drv_struct {
 struct ibm_struct {
        char *name;
 
-       int (*read) (struct seq_file *);
+       int (*read) (char *);
        int (*write) (char *);
        void (*exit) (void);
        void (*resume) (void);
@@ -281,7 +280,6 @@ struct ibm_init_struct {
        char param[32];
 
        int (*init) (struct ibm_init_struct *);
-       mode_t base_procfs_mode;
        struct ibm_struct *data;
 };
 
@@ -778,25 +776,36 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
  ****************************************************************************
  ****************************************************************************/
 
-static int dispatch_proc_show(struct seq_file *m, void *v)
+static int dispatch_procfs_read(char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
 {
-       struct ibm_struct *ibm = m->private;
+       struct ibm_struct *ibm = data;
+       int len;
 
        if (!ibm || !ibm->read)
                return -EINVAL;
-       return ibm->read(m);
-}
 
-static int dispatch_proc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, dispatch_proc_show, PDE(inode)->data);
+       len = ibm->read(page);
+       if (len < 0)
+               return len;
+
+       if (len <= off + count)
+               *eof = 1;
+       *start = page + off;
+       len -= off;
+       if (len > count)
+               len = count;
+       if (len < 0)
+               len = 0;
+
+       return len;
 }
 
-static ssize_t dispatch_proc_write(struct file *file,
+static int dispatch_procfs_write(struct file *file,
                        const char __user *userbuf,
-                       size_t count, loff_t *pos)
+                       unsigned long count, void *data)
 {
-       struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
+       struct ibm_struct *ibm = data;
        char *kernbuf;
        int ret;
 
@@ -825,15 +834,6 @@ static ssize_t dispatch_proc_write(struct file *file,
        return ret;
 }
 
-static const struct file_operations dispatch_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = dispatch_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-       .write          = dispatch_proc_write,
-};
-
 static char *next_cmd(char **cmds)
 {
        char *start = *cmds;
@@ -1264,7 +1264,6 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
        struct tpacpi_rfk *atp_rfk;
        int res;
        bool sw_state = false;
-       bool hw_state;
        int sw_status;
 
        BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1299,8 +1298,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
                        rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
                }
        }
-       hw_state = tpacpi_rfk_check_hwblock_state();
-       rfkill_set_hw_state(atp_rfk->rfkill, hw_state);
+       rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
 
        res = rfkill_register(atp_rfk->rfkill);
        if (res < 0) {
@@ -1313,9 +1311,6 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
        }
 
        tpacpi_rfkill_switches[id] = atp_rfk;
-
-       printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
-               name, (sw_state || hw_state) ? "" : "un");
        return 0;
 }
 
@@ -1388,11 +1383,12 @@ static ssize_t tpacpi_rfk_sysfs_enable_store(const enum tpacpi_rfk_id id,
 }
 
 /* procfs -------------------------------------------------------------- */
-static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id,
-                                 struct seq_file *m)
+static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
 {
+       int len = 0;
+
        if (id >= TPACPI_RFK_SW_MAX)
-               seq_printf(m, "status:\t\tnot supported\n");
+               len += sprintf(p + len, "status:\t\tnot supported\n");
        else {
                int status;
 
@@ -1406,13 +1402,13 @@ static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id,
                                return status;
                }
 
-               seq_printf(m, "status:\t\t%s\n",
+               len += sprintf(p + len, "status:\t\t%s\n",
                                (status == TPACPI_RFK_RADIO_ON) ?
                                        "enabled" : "disabled");
-               seq_printf(m, "commands:\tenable, disable\n");
+               len += sprintf(p + len, "commands:\tenable, disable\n");
        }
 
-       return 0;
+       return len;
 }
 
 static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
@@ -1783,7 +1779,7 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
 
        TPV_QL1('7', '9',  'E', '3',  '5', '0'), /* T60/p */
        TPV_QL1('7', 'C',  'D', '2',  '2', '2'), /* R60, R60i */
-       TPV_QL1('7', 'E',  'D', '0',  '1', '5'), /* R60e, R60i */
+       TPV_QL0('7', 'E',  'D', '0'),            /* R60e, R60i */
 
        /*      BIOS FW    BIOS VERS  EC FW     EC VERS */
        TPV_QI2('1', 'W',  '9', '0',  '1', 'V', '2', '8'), /* R50e (1) */
@@ -1799,8 +1795,8 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
        TPV_QI1('7', '4',  '6', '4',  '2', '7'), /* X41 (0) */
        TPV_QI1('7', '5',  '6', '0',  '2', '0'), /* X41t (0) */
 
-       TPV_QL1('7', 'B',  'D', '7',  '4', '0'), /* X60/s */
-       TPV_QL1('7', 'J',  '3', '0',  '1', '3'), /* X60t */
+       TPV_QL0('7', 'B',  'D', '7'),            /* X60/s */
+       TPV_QL0('7', 'J',  '3', '0'),            /* X60t */
 
        /* (0) - older versions lack DMI EC fw string and functionality */
        /* (1) - older versions known to lack functionality */
@@ -1890,11 +1886,14 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
        return 0;
 }
 
-static int thinkpad_acpi_driver_read(struct seq_file *m)
+static int thinkpad_acpi_driver_read(char *p)
 {
-       seq_printf(m, "driver:\t\t%s\n", TPACPI_DESC);
-       seq_printf(m, "version:\t%s\n", TPACPI_VERSION);
-       return 0;
+       int len = 0;
+
+       len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
+       len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
+
+       return len;
 }
 
 static struct ibm_struct thinkpad_acpi_driver_data = {
@@ -2074,7 +2073,6 @@ static struct attribute_set *hotkey_dev_attributes;
 
 static void tpacpi_driver_event(const unsigned int hkey_event);
 static void hotkey_driver_event(const unsigned int scancode);
-static void hotkey_poll_setup(const bool may_warn);
 
 /* HKEY.MHKG() return bits */
 #define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -2191,8 +2189,7 @@ static int hotkey_mask_set(u32 mask)
                       fwmask, hotkey_acpi_mask);
        }
 
-       if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
-               hotkey_mask_warn_incomplete_mask();
+       hotkey_mask_warn_incomplete_mask();
 
        return rc;
 }
@@ -2257,8 +2254,6 @@ static int tpacpi_hotkey_driver_mask_set(const u32 mask)
 
        rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
                                                        ~hotkey_source_mask);
-       hotkey_poll_setup(true);
-
        mutex_unlock(&hotkey_mutex);
 
        return rc;
@@ -2543,7 +2538,7 @@ static void hotkey_poll_stop_sync(void)
 }
 
 /* call with hotkey_mutex held */
-static void hotkey_poll_setup(const bool may_warn)
+static void hotkey_poll_setup(bool may_warn)
 {
        const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
        const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
@@ -2574,7 +2569,7 @@ static void hotkey_poll_setup(const bool may_warn)
        }
 }
 
-static void hotkey_poll_setup_safe(const bool may_warn)
+static void hotkey_poll_setup_safe(bool may_warn)
 {
        mutex_lock(&hotkey_mutex);
        hotkey_poll_setup(may_warn);
@@ -2592,11 +2587,7 @@ static void hotkey_poll_set_freq(unsigned int freq)
 
 #else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
 
-static void hotkey_poll_setup(const bool __unused)
-{
-}
-
-static void hotkey_poll_setup_safe(const bool __unused)
+static void hotkey_poll_setup_safe(bool __unused)
 {
 }
 
@@ -2606,11 +2597,16 @@ static int hotkey_inputdev_open(struct input_dev *dev)
 {
        switch (tpacpi_lifecycle) {
        case TPACPI_LIFE_INIT:
-       case TPACPI_LIFE_RUNNING:
-               hotkey_poll_setup_safe(false);
+               /*
+                * hotkey_init will call hotkey_poll_setup_safe
+                * at the appropriate moment
+                */
                return 0;
        case TPACPI_LIFE_EXITING:
                return -EBUSY;
+       case TPACPI_LIFE_RUNNING:
+               hotkey_poll_setup_safe(false);
+               return 0;
        }
 
        /* Should only happen if tpacpi_lifecycle is corrupt */
@@ -2621,7 +2617,7 @@ static int hotkey_inputdev_open(struct input_dev *dev)
 static void hotkey_inputdev_close(struct input_dev *dev)
 {
        /* disable hotkey polling when possible */
-       if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
+       if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
            !(hotkey_source_mask & hotkey_driver_mask))
                hotkey_poll_setup_safe(false);
 }
@@ -3189,8 +3185,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        int res, i;
        int status;
        int hkeyv;
-       bool radiosw_state  = false;
-       bool tabletsw_state = false;
 
        unsigned long quirks;
 
@@ -3296,7 +3290,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
 #ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
        if (dbg_wlswemul) {
                tp_features.hotkey_wlsw = 1;
-               radiosw_state = !!tpacpi_wlsw_emulstate;
                printk(TPACPI_INFO
                        "radio switch emulation enabled\n");
        } else
@@ -3304,7 +3297,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        /* Not all thinkpads have a hardware radio switch */
        if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
                tp_features.hotkey_wlsw = 1;
-               radiosw_state = !!status;
                printk(TPACPI_INFO
                        "radio switch found; radios are %s\n",
                        enabled(status, 0));
@@ -3316,11 +3308,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        /* For X41t, X60t, X61t Tablets... */
        if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
                tp_features.hotkey_tablet = 1;
-               tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
                printk(TPACPI_INFO
                        "possible tablet mode switch found; "
                        "ThinkPad in %s mode\n",
-                       (tabletsw_state) ? "tablet" : "laptop");
+                       (status & TP_HOTKEY_TABLET_MASK)?
+                               "tablet" : "laptop");
                res = add_to_attr_set(hotkey_dev_attributes,
                                &dev_attr_hotkey_tablet_mode.attr);
        }
@@ -3355,14 +3347,16 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
                        TPACPI_HOTKEY_MAP_SIZE);
        }
 
-       input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
+       set_bit(EV_KEY, tpacpi_inputdev->evbit);
+       set_bit(EV_MSC, tpacpi_inputdev->evbit);
+       set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
        tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
        tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
        tpacpi_inputdev->keycode = hotkey_keycode_map;
        for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
                if (hotkey_keycode_map[i] != KEY_RESERVED) {
-                       input_set_capability(tpacpi_inputdev, EV_KEY,
-                                               hotkey_keycode_map[i]);
+                       set_bit(hotkey_keycode_map[i],
+                               tpacpi_inputdev->keybit);
                } else {
                        if (i < sizeof(hotkey_reserved_mask)*8)
                                hotkey_reserved_mask |= 1 << i;
@@ -3370,14 +3364,12 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        }
 
        if (tp_features.hotkey_wlsw) {
-               input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
-               input_report_switch(tpacpi_inputdev,
-                                   SW_RFKILL_ALL, radiosw_state);
+               set_bit(EV_SW, tpacpi_inputdev->evbit);
+               set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit);
        }
        if (tp_features.hotkey_tablet) {
-               input_set_capability(tpacpi_inputdev, EV_SW, SW_TABLET_MODE);
-               input_report_switch(tpacpi_inputdev,
-                                   SW_TABLET_MODE, tabletsw_state);
+               set_bit(EV_SW, tpacpi_inputdev->evbit);
+               set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
        }
 
        /* Do not issue duplicate brightness change events to
@@ -3444,6 +3436,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        tpacpi_inputdev->close = &hotkey_inputdev_close;
 
        hotkey_poll_setup_safe(true);
+       tpacpi_send_radiosw_update();
+       tpacpi_input_send_tabletsw();
 
        return 0;
 
@@ -3551,57 +3545,49 @@ static bool hotkey_notify_usrevent(const u32 hkey,
        }
 }
 
-static void thermal_dump_all_sensors(void);
-
 static bool hotkey_notify_thermal(const u32 hkey,
                                 bool *send_acpi_ev,
                                 bool *ignore_acpi_ev)
 {
-       bool known = true;
-
        /* 0x6000-0x6FFF: thermal alarms */
        *send_acpi_ev = true;
        *ignore_acpi_ev = false;
 
        switch (hkey) {
-       case TP_HKEY_EV_THM_TABLE_CHANGED:
-               printk(TPACPI_INFO
-                       "EC reports that Thermal Table has changed\n");
-               /* recommended action: do nothing, we don't have
-                * Lenovo ATM information */
-               return true;
        case TP_HKEY_EV_ALARM_BAT_HOT:
                printk(TPACPI_CRIT
                        "THERMAL ALARM: battery is too hot!\n");
                /* recommended action: warn user through gui */
-               break;
+               return true;
        case TP_HKEY_EV_ALARM_BAT_XHOT:
                printk(TPACPI_ALERT
                        "THERMAL EMERGENCY: battery is extremely hot!\n");
                /* recommended action: immediate sleep/hibernate */
-               break;
+               return true;
        case TP_HKEY_EV_ALARM_SENSOR_HOT:
                printk(TPACPI_CRIT
                        "THERMAL ALARM: "
                        "a sensor reports something is too hot!\n");
                /* recommended action: warn user through gui, that */
                /* some internal component is too hot */
-               break;
+               return true;
        case TP_HKEY_EV_ALARM_SENSOR_XHOT:
                printk(TPACPI_ALERT
                        "THERMAL EMERGENCY: "
                        "a sensor reports something is extremely hot!\n");
                /* recommended action: immediate sleep/hibernate */
-               break;
+               return true;
+       case TP_HKEY_EV_THM_TABLE_CHANGED:
+               printk(TPACPI_INFO
+                       "EC reports that Thermal Table has changed\n");
+               /* recommended action: do nothing, we don't have
+                * Lenovo ATM information */
+               return true;
        default:
                printk(TPACPI_ALERT
                         "THERMAL ALERT: unknown thermal alarm received\n");
-               known = false;
+               return false;
        }
-
-       thermal_dump_all_sensors();
-
-       return known;
 }
 
 static void hotkey_notify(struct ibm_struct *ibm, u32 event)
@@ -3649,19 +3635,13 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
                        break;
                case 3:
                        /* 0x3000-0x3FFF: bay-related wakeups */
-                       switch (hkey) {
-                       case TP_HKEY_EV_BAYEJ_ACK:
+                       if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
                                hotkey_autosleep_ack = 1;
                                printk(TPACPI_INFO
                                       "bay ejected\n");
                                hotkey_wakeup_hotunplug_complete_notify_change();
                                known_ev = true;
-                               break;
-                       case TP_HKEY_EV_OPTDRV_EJ:
-                               /* FIXME: kick libata if SATA link offline */
-                               known_ev = true;
-                               break;
-                       default:
+                       } else {
                                known_ev = false;
                        }
                        break;
@@ -3750,13 +3730,14 @@ static void hotkey_resume(void)
 }
 
 /* procfs -------------------------------------------------------------- */
-static int hotkey_read(struct seq_file *m)
+static int hotkey_read(char *p)
 {
        int res, status;
+       int len = 0;
 
        if (!tp_features.hotkey) {
-               seq_printf(m, "status:\t\tnot supported\n");
-               return 0;
+               len += sprintf(p + len, "status:\t\tnot supported\n");
+               return len;
        }
 
        if (mutex_lock_killable(&hotkey_mutex))
@@ -3768,16 +3749,17 @@ static int hotkey_read(struct seq_file *m)
        if (res)
                return res;
 
-       seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
+       len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
        if (hotkey_all_mask) {
-               seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
-               seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
+               len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
+               len += sprintf(p + len,
+                              "commands:\tenable, disable, reset, <mask>\n");
        } else {
-               seq_printf(m, "mask:\t\tnot supported\n");
-               seq_printf(m, "commands:\tenable, disable, reset\n");
+               len += sprintf(p + len, "mask:\t\tnot supported\n");
+               len += sprintf(p + len, "commands:\tenable, disable, reset\n");
        }
 
-       return 0;
+       return len;
 }
 
 static void hotkey_enabledisable_warn(bool enable)
@@ -3870,7 +3852,7 @@ enum {
        TP_ACPI_BLUETOOTH_HWPRESENT     = 0x01, /* Bluetooth hw available */
        TP_ACPI_BLUETOOTH_RADIOSSW      = 0x02, /* Bluetooth radio enabled */
        TP_ACPI_BLUETOOTH_RESUMECTRL    = 0x04, /* Bluetooth state at resume:
-                                                  0 = disable, 1 = enable */
+                                                  off / last state */
 };
 
 enum {
@@ -3916,11 +3898,10 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
        }
 #endif
 
+       /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
+       status = TP_ACPI_BLUETOOTH_RESUMECTRL;
        if (state == TPACPI_RFK_RADIO_ON)
-               status = TP_ACPI_BLUETOOTH_RADIOSSW
-                         | TP_ACPI_BLUETOOTH_RESUMECTRL;
-       else
-               status = 0;
+               status |= TP_ACPI_BLUETOOTH_RADIOSSW;
 
        if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
                return -EIO;
@@ -4044,9 +4025,9 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
 }
 
 /* procfs -------------------------------------------------------------- */
-static int bluetooth_read(struct seq_file *m)
+static int bluetooth_read(char *p)
 {
-       return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, m);
+       return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, p);
 }
 
 static int bluetooth_write(char *buf)
@@ -4071,7 +4052,7 @@ enum {
        TP_ACPI_WANCARD_HWPRESENT       = 0x01, /* Wan hw available */
        TP_ACPI_WANCARD_RADIOSSW        = 0x02, /* Wan radio enabled */
        TP_ACPI_WANCARD_RESUMECTRL      = 0x04, /* Wan state at resume:
-                                                  0 = disable, 1 = enable */
+                                                  off / last state */
 };
 
 #define TPACPI_RFK_WWAN_SW_NAME                "tpacpi_wwan_sw"
@@ -4108,11 +4089,10 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
        }
 #endif
 
+       /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
+       status = TP_ACPI_WANCARD_RESUMECTRL;
        if (state == TPACPI_RFK_RADIO_ON)
-               status = TP_ACPI_WANCARD_RADIOSSW
-                        | TP_ACPI_WANCARD_RESUMECTRL;
-       else
-               status = 0;
+               status |= TP_ACPI_WANCARD_RADIOSSW;
 
        if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
                return -EIO;
@@ -4235,9 +4215,9 @@ static int __init wan_init(struct ibm_init_struct *iibm)
 }
 
 /* procfs -------------------------------------------------------------- */
-static int wan_read(struct seq_file *m)
+static int wan_read(char *p)
 {
-       return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, m);
+       return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, p);
 }
 
 static int wan_write(char *buf)
@@ -4612,19 +4592,16 @@ static int video_expand_toggle(void)
        /* not reached */
 }
 
-static int video_read(struct seq_file *m)
+static int video_read(char *p)
 {
        int status, autosw;
+       int len = 0;
 
        if (video_supported == TPACPI_VIDEO_NONE) {
-               seq_printf(m, "status:\t\tnot supported\n");
-               return 0;
+               len += sprintf(p + len, "status:\t\tnot supported\n");
+               return len;
        }
 
-       /* Even reads can crash X.org, so... */
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
        status = video_outputsw_get();
        if (status < 0)
                return status;
@@ -4633,20 +4610,20 @@ static int video_read(struct seq_file *m)
        if (autosw < 0)
                return autosw;
 
-       seq_printf(m, "status:\t\tsupported\n");
-       seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
-       seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
+       len += sprintf(p + len, "status:\t\tsupported\n");
+       len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0));
+       len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1));
        if (video_supported == TPACPI_VIDEO_NEW)
-               seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
-       seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
-       seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
-       seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
+               len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3));
+       len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0));
+       len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n");
+       len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n");
        if (video_supported == TPACPI_VIDEO_NEW)
-               seq_printf(m, "commands:\tdvi_enable, dvi_disable\n");
-       seq_printf(m, "commands:\tauto_enable, auto_disable\n");
-       seq_printf(m, "commands:\tvideo_switch, expand_toggle\n");
+               len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n");
+       len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n");
+       len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n");
 
-       return 0;
+       return len;
 }
 
 static int video_write(char *buf)
@@ -4658,10 +4635,6 @@ static int video_write(char *buf)
        if (video_supported == TPACPI_VIDEO_NONE)
                return -ENODEV;
 
-       /* Even reads can crash X.org, let alone writes... */
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
        enable = 0;
        disable = 0;
 
@@ -4842,24 +4815,25 @@ static void light_exit(void)
                flush_workqueue(tpacpi_wq);
 }
 
-static int light_read(struct seq_file *m)
+static int light_read(char *p)
 {
+       int len = 0;
        int status;
 
        if (!tp_features.light) {
-               seq_printf(m, "status:\t\tnot supported\n");
+               len += sprintf(p + len, "status:\t\tnot supported\n");
        } else if (!tp_features.light_status) {
-               seq_printf(m, "status:\t\tunknown\n");
-               seq_printf(m, "commands:\ton, off\n");
+               len += sprintf(p + len, "status:\t\tunknown\n");
+               len += sprintf(p + len, "commands:\ton, off\n");
        } else {
                status = light_get_status();
                if (status < 0)
                        return status;
-               seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
-               seq_printf(m, "commands:\ton, off\n");
+               len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
+               len += sprintf(p + len, "commands:\ton, off\n");
        }
 
-       return 0;
+       return len;
 }
 
 static int light_write(char *buf)
@@ -4937,18 +4911,20 @@ static void cmos_exit(void)
        device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
 }
 
-static int cmos_read(struct seq_file *m)
+static int cmos_read(char *p)
 {
+       int len = 0;
+
        /* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
           R30, R31, T20-22, X20-21 */
        if (!cmos_handle)
-               seq_printf(m, "status:\t\tnot supported\n");
+               len += sprintf(p + len, "status:\t\tnot supported\n");
        else {
-               seq_printf(m, "status:\t\tsupported\n");
-               seq_printf(m, "commands:\t<cmd> (<cmd> is 0-21)\n");
+               len += sprintf(p + len, "status:\t\tsupported\n");
+               len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n");
        }
 
-       return 0;
+       return len;
 }
 
 static int cmos_write(char *buf)
@@ -5323,13 +5299,15 @@ static int __init led_init(struct ibm_init_struct *iibm)
        ((s) == TPACPI_LED_OFF ? "off" : \
                ((s) == TPACPI_LED_ON ? "on" : "blinking"))
 
-static int led_read(struct seq_file *m)
+static int led_read(char *p)
 {
+       int len = 0;
+
        if (!led_supported) {
-               seq_printf(m, "status:\t\tnot supported\n");
-               return 0;
+               len += sprintf(p + len, "status:\t\tnot supported\n");
+               return len;
        }
-       seq_printf(m, "status:\t\tsupported\n");
+       len += sprintf(p + len, "status:\t\tsupported\n");
 
        if (led_supported == TPACPI_LED_570) {
                /* 570 */
@@ -5338,15 +5316,15 @@ static int led_read(struct seq_file *m)
                        status = led_get_status(i);
                        if (status < 0)
                                return -EIO;
-                       seq_printf(m, "%d:\t\t%s\n",
+                       len += sprintf(p + len, "%d:\t\t%s\n",
                                       i, str_led_status(status));
                }
        }
 
-       seq_printf(m, "commands:\t"
+       len += sprintf(p + len, "commands:\t"
                       "<led> on, <led> off, <led> blink (<led> is 0-15)\n");
 
-       return 0;
+       return len;
 }
 
 static int led_write(char *buf)
@@ -5419,16 +5397,18 @@ static int __init beep_init(struct ibm_init_struct *iibm)
        return (beep_handle)? 0 : 1;
 }
 
-static int beep_read(struct seq_file *m)
+static int beep_read(char *p)
 {
+       int len = 0;
+
        if (!beep_handle)
-               seq_printf(m, "status:\t\tnot supported\n");
+               len += sprintf(p + len, "status:\t\tnot supported\n");
        else {
-               seq_printf(m, "status:\t\tsupported\n");
-               seq_printf(m, "commands:\t<cmd> (<cmd> is 0-17)\n");
+               len += sprintf(p + len, "status:\t\tsupported\n");
+               len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n");
        }
 
-       return 0;
+       return len;
 }
 
 static int beep_write(char *buf)
@@ -5481,11 +5461,8 @@ enum { /* TPACPI_THERMAL_TPEC_* */
        TP_EC_THERMAL_TMP0 = 0x78,      /* ACPI EC regs TMP 0..7 */
        TP_EC_THERMAL_TMP8 = 0xC0,      /* ACPI EC regs TMP 8..15 */
        TP_EC_THERMAL_TMP_NA = -128,    /* ACPI EC sensor not available */
-
-       TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
 };
 
-
 #define TPACPI_MAX_THERMAL_SENSORS 16  /* Max thermal sensors supported */
 struct ibm_thermal_sensors_struct {
        s32 temp[TPACPI_MAX_THERMAL_SENSORS];
@@ -5575,28 +5552,6 @@ static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
        return n;
 }
 
-static void thermal_dump_all_sensors(void)
-{
-       int n, i;
-       struct ibm_thermal_sensors_struct t;
-
-       n = thermal_get_sensors(&t);
-       if (n <= 0)
-               return;
-
-       printk(TPACPI_NOTICE
-               "temperatures (Celsius):");
-
-       for (i = 0; i < n; i++) {
-               if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
-                       printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
-               else
-                       printk(KERN_CONT " N/A");
-       }
-
-       printk(KERN_CONT "\n");
-}
-
 /* sysfs temp##_input -------------------------------------------------- */
 
 static ssize_t thermal_temp_input_show(struct device *dev,
@@ -5612,7 +5567,7 @@ static ssize_t thermal_temp_input_show(struct device *dev,
        res = thermal_get_sensor(idx, &value);
        if (res)
                return res;
-       if (value == TPACPI_THERMAL_SENSOR_NA)
+       if (value == TP_EC_THERMAL_TMP_NA * 1000)
                return -ENXIO;
 
        return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -5781,7 +5736,7 @@ static void thermal_exit(void)
        case TPACPI_THERMAL_ACPI_TMP07:
        case TPACPI_THERMAL_ACPI_UPDT:
                sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
-                                  &thermal_temp_input8_group);
+                                  &thermal_temp_input16_group);
                break;
        case TPACPI_THERMAL_NONE:
        default:
@@ -5789,8 +5744,9 @@ static void thermal_exit(void)
        }
 }
 
-static int thermal_read(struct seq_file *m)
+static int thermal_read(char *p)
 {
+       int len = 0;
        int n, i;
        struct ibm_thermal_sensors_struct t;
 
@@ -5798,16 +5754,16 @@ static int thermal_read(struct seq_file *m)
        if (unlikely(n < 0))
                return n;
 
-       seq_printf(m, "temperatures:\t");
+       len += sprintf(p + len, "temperatures:\t");
 
        if (n > 0) {
                for (i = 0; i < (n - 1); i++)
-                       seq_printf(m, "%d ", t.temp[i] / 1000);
-               seq_printf(m, "%d\n", t.temp[i] / 1000);
+                       len += sprintf(p + len, "%d ", t.temp[i] / 1000);
+               len += sprintf(p + len, "%d\n", t.temp[i] / 1000);
        } else
-               seq_printf(m, "not supported\n");
+               len += sprintf(p + len, "not supported\n");
 
-       return 0;
+       return len;
 }
 
 static struct ibm_struct thermal_driver_data = {
@@ -5822,38 +5778,39 @@ static struct ibm_struct thermal_driver_data = {
 
 static u8 ecdump_regs[256];
 
-static int ecdump_read(struct seq_file *m)
+static int ecdump_read(char *p)
 {
+       int len = 0;
        int i, j;
        u8 v;
 
-       seq_printf(m, "EC      "
+       len += sprintf(p + len, "EC      "
                       " +00 +01 +02 +03 +04 +05 +06 +07"
                       " +08 +09 +0a +0b +0c +0d +0e +0f\n");
        for (i = 0; i < 256; i += 16) {
-               seq_printf(m, "EC 0x%02x:", i);
+               len += sprintf(p + len, "EC 0x%02x:", i);
                for (j = 0; j < 16; j++) {
                        if (!acpi_ec_read(i + j, &v))
                                break;
                        if (v != ecdump_regs[i + j])
-                               seq_printf(m, " *%02x", v);
+                               len += sprintf(p + len, " *%02x", v);
                        else
-                               seq_printf(m, "  %02x", v);
+                               len += sprintf(p + len, "  %02x", v);
                        ecdump_regs[i + j] = v;
                }
-               seq_putc(m, '\n');
+               len += sprintf(p + len, "\n");
                if (j != 16)
                        break;
        }
 
        /* These are way too dangerous to advertise openly... */
 #if 0
-       seq_printf(m, "commands:\t0x<offset> 0x<value>"
+       len += sprintf(p + len, "commands:\t0x<offset> 0x<value>"
                       " (<offset> is 00-ff, <value> is 00-ff)\n");
-       seq_printf(m, "commands:\t0x<offset> <value>  "
+       len += sprintf(p + len, "commands:\t0x<offset> <value>  "
                       " (<offset> is 00-ff, <value> is 0-255)\n");
 #endif
-       return 0;
+       return len;
 }
 
 static int ecdump_write(char *buf)
@@ -6116,12 +6073,6 @@ static int brightness_get(struct backlight_device *bd)
        return status & TP_EC_BACKLIGHT_LVLMSK;
 }
 
-static void tpacpi_brightness_notify_change(void)
-{
-       backlight_force_update(ibm_backlight_device,
-                              BACKLIGHT_UPDATE_HOTKEY);
-}
-
 static struct backlight_ops ibm_backlight_data = {
        .get_brightness = brightness_get,
        .update_status  = brightness_update_status,
@@ -6143,13 +6094,13 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
        TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC),      /* T43/p ATI */
 
        /* Models with ATI GPUs that can use ECNVRAM */
-       TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),      /* R50,51 T40-42 */
+       TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),
        TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
-       TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC),      /* R52 */
+       TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
        TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
 
        /* Models with Intel Extreme Graphics 2 */
-       TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),    /* X40 */
+       TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
        TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
        TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
 
@@ -6276,12 +6227,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
        ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
        backlight_update_status(ibm_backlight_device);
 
-       vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
-                       "brightness: registering brightness hotkeys "
-                       "as change notification\n");
-       tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
-                               | TP_ACPI_HKEY_BRGHTUP_MASK
-                               | TP_ACPI_HKEY_BRGHTDWN_MASK);;
        return 0;
 }
 
@@ -6306,22 +6251,23 @@ static void brightness_exit(void)
        tpacpi_brightness_checkpoint_nvram();
 }
 
-static int brightness_read(struct seq_file *m)
+static int brightness_read(char *p)
 {
+       int len = 0;
        int level;
 
        level = brightness_get(NULL);
        if (level < 0) {
-               seq_printf(m, "level:\t\tunreadable\n");
+               len += sprintf(p + len, "level:\t\tunreadable\n");
        } else {
-               seq_printf(m, "level:\t\t%d\n", level);
-               seq_printf(m, "commands:\tup, down\n");
-               seq_printf(m, "commands:\tlevel <level>"
+               len += sprintf(p + len, "level:\t\t%d\n", level);
+               len += sprintf(p + len, "commands:\tup, down\n");
+               len += sprintf(p + len, "commands:\tlevel <level>"
                               " (<level> is 0-%d)\n",
                               (tp_features.bright_16levels) ? 15 : 7);
        }
 
-       return 0;
+       return len;
 }
 
 static int brightness_write(char *buf)
@@ -6357,9 +6303,6 @@ static int brightness_write(char *buf)
         * Doing it this way makes the syscall restartable in case of EINTR
         */
        rc = brightness_set(level);
-       if (!rc && ibm_backlight_device)
-               backlight_force_update(ibm_backlight_device,
-                                       BACKLIGHT_UPDATE_SYSFS);
        return (rc == -EINTR)? -ERESTARTSYS : rc;
 }
 
@@ -6378,21 +6321,22 @@ static struct ibm_struct brightness_driver_data = {
 
 static int volume_offset = 0x30;
 
-static int volume_read(struct seq_file *m)
+static int volume_read(char *p)
 {
+       int len = 0;
        u8 level;
 
        if (!acpi_ec_read(volume_offset, &level)) {
-               seq_printf(m, "level:\t\tunreadable\n");
+               len += sprintf(p + len, "level:\t\tunreadable\n");
        } else {
-               seq_printf(m, "level:\t\t%d\n", level & 0xf);
-               seq_printf(m, "mute:\t\t%s\n", onoff(level, 6));
-               seq_printf(m, "commands:\tup, down, mute\n");
-               seq_printf(m, "commands:\tlevel <level>"
+               len += sprintf(p + len, "level:\t\t%d\n", level & 0xf);
+               len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6));
+               len += sprintf(p + len, "commands:\tup, down, mute\n");
+               len += sprintf(p + len, "commands:\tlevel <level>"
                               " (<level> is 0-15)\n");
        }
 
-       return 0;
+       return len;
 }
 
 static int volume_write(char *buf)
@@ -7544,8 +7488,9 @@ static void fan_resume(void)
        }
 }
 
-static int fan_read(struct seq_file *m)
+static int fan_read(char *p)
 {
+       int len = 0;
        int rc;
        u8 status;
        unsigned int speed = 0;
@@ -7557,7 +7502,7 @@ static int fan_read(struct seq_file *m)
                if (rc < 0)
                        return rc;
 
-               seq_printf(m, "status:\t\t%s\n"
+               len += sprintf(p + len, "status:\t\t%s\n"
                               "level:\t\t%d\n",
                               (status != 0) ? "enabled" : "disabled", status);
                break;
@@ -7568,54 +7513,54 @@ static int fan_read(struct seq_file *m)
                if (rc < 0)
                        return rc;
 
-               seq_printf(m, "status:\t\t%s\n",
+               len += sprintf(p + len, "status:\t\t%s\n",
                               (status != 0) ? "enabled" : "disabled");
 
                rc = fan_get_speed(&speed);
                if (rc < 0)
                        return rc;
 
-               seq_printf(m, "speed:\t\t%d\n", speed);
+               len += sprintf(p + len, "speed:\t\t%d\n", speed);
 
                if (status & TP_EC_FAN_FULLSPEED)
                        /* Disengaged mode takes precedence */
-                       seq_printf(m, "level:\t\tdisengaged\n");
+                       len += sprintf(p + len, "level:\t\tdisengaged\n");
                else if (status & TP_EC_FAN_AUTO)
-                       seq_printf(m, "level:\t\tauto\n");
+                       len += sprintf(p + len, "level:\t\tauto\n");
                else
-                       seq_printf(m, "level:\t\t%d\n", status);
+                       len += sprintf(p + len, "level:\t\t%d\n", status);
                break;
 
        case TPACPI_FAN_NONE:
        default:
-               seq_printf(m, "status:\t\tnot supported\n");
+               len += sprintf(p + len, "status:\t\tnot supported\n");
        }
 
        if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
-               seq_printf(m, "commands:\tlevel <level>");
+               len += sprintf(p + len, "commands:\tlevel <level>");
 
                switch (fan_control_access_mode) {
                case TPACPI_FAN_WR_ACPI_SFAN:
-                       seq_printf(m, " (<level> is 0-7)\n");
+                       len += sprintf(p + len, " (<level> is 0-7)\n");
                        break;
 
                default:
-                       seq_printf(m, " (<level> is 0-7, "
+                       len += sprintf(p + len, " (<level> is 0-7, "
                                       "auto, disengaged, full-speed)\n");
                        break;
                }
        }
 
        if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
-               seq_printf(m, "commands:\tenable, disable\n"
+               len += sprintf(p + len, "commands:\tenable, disable\n"
                               "commands:\twatchdog <timeout> (<timeout> "
                               "is 0 (off), 1-120 (seconds))\n");
 
        if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
-               seq_printf(m, "commands:\tspeed <speed>"
+               len += sprintf(p + len, "commands:\tspeed <speed>"
                               " (<speed> is 0-65535)\n");
 
-       return 0;
+       return len;
 }
 
 static int fan_write_cmd_level(const char *cmd, int *rc)
@@ -7757,13 +7702,6 @@ static struct ibm_struct fan_driver_data = {
  */
 static void tpacpi_driver_event(const unsigned int hkey_event)
 {
-       if (ibm_backlight_device) {
-               switch (hkey_event) {
-               case TP_HKEY_EV_BRGHT_UP:
-               case TP_HKEY_EV_BRGHT_DOWN:
-                       tpacpi_brightness_notify_change();
-               }
-       }
 }
 
 
@@ -7896,20 +7834,19 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
                "%s installed\n", ibm->name);
 
        if (ibm->read) {
-               mode_t mode = iibm->base_procfs_mode;
-
-               if (!mode)
-                       mode = S_IRUGO;
-               if (ibm->write)
-                       mode |= S_IWUSR;
-               entry = proc_create_data(ibm->name, mode, proc_dir,
-                                        &dispatch_proc_fops, ibm);
+               entry = create_proc_entry(ibm->name,
+                                         S_IFREG | S_IRUGO | S_IWUSR,
+                                         proc_dir);
                if (!entry) {
                        printk(TPACPI_ERR "unable to create proc entry %s\n",
                               ibm->name);
                        ret = -ENODEV;
                        goto err_out;
                }
+               entry->data = ibm;
+               entry->read_proc = &dispatch_procfs_read;
+               if (ibm->write)
+                       entry->write_proc = &dispatch_procfs_write;
                ibm->flags.proc_created = 1;
        }
 
@@ -8090,7 +8027,6 @@ static struct ibm_init_struct ibms_init[] __initdata = {
 #ifdef CONFIG_THINKPAD_ACPI_VIDEO
        {
                .init = video_init,
-               .base_procfs_mode = S_IRUSR,
                .data = &video_driver_data,
        },
 #endif
@@ -8157,32 +8093,32 @@ static int __init set_ibm_param(const char *val, struct kernel_param *kp)
        return -EINVAL;
 }
 
-module_param(experimental, int, 0444);
+module_param(experimental, int, 0);
 MODULE_PARM_DESC(experimental,
                 "Enables experimental features when non-zero");
 
 module_param_named(debug, dbg_level, uint, 0);
 MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
 
-module_param(force_load, bool, 0444);
+module_param(force_load, bool, 0);
 MODULE_PARM_DESC(force_load,
                 "Attempts to load the driver even on a "
                 "mis-identified ThinkPad when true");
 
-module_param_named(fan_control, fan_control_allowed, bool, 0444);
+module_param_named(fan_control, fan_control_allowed, bool, 0);
 MODULE_PARM_DESC(fan_control,
                 "Enables setting fan parameters features when true");
 
-module_param_named(brightness_mode, brightness_mode, uint, 0444);
+module_param_named(brightness_mode, brightness_mode, uint, 0);
 MODULE_PARM_DESC(brightness_mode,
                 "Selects brightness control strategy: "
                 "0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
 
-module_param(brightness_enable, uint, 0444);
+module_param(brightness_enable, uint, 0);
 MODULE_PARM_DESC(brightness_enable,
                 "Enables backlight control when 1, disables when 0");
 
-module_param(hotkey_report_mode, uint, 0444);
+module_param(hotkey_report_mode, uint, 0);
 MODULE_PARM_DESC(hotkey_report_mode,
                 "used for backwards compatibility with userspace, "
                 "see documentation");
@@ -8205,25 +8141,25 @@ TPACPI_PARAM(volume);
 TPACPI_PARAM(fan);
 
 #ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
-module_param(dbg_wlswemul, uint, 0444);
+module_param(dbg_wlswemul, uint, 0);
 MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
 module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
 MODULE_PARM_DESC(wlsw_state,
                 "Initial state of the emulated WLSW switch");
 
-module_param(dbg_bluetoothemul, uint, 0444);
+module_param(dbg_bluetoothemul, uint, 0);
 MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
 module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
 MODULE_PARM_DESC(bluetooth_state,
                 "Initial state of the emulated bluetooth switch");
 
-module_param(dbg_wwanemul, uint, 0444);
+module_param(dbg_wwanemul, uint, 0);
 MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
 module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
 MODULE_PARM_DESC(wwan_state,
                 "Initial state of the emulated WWAN switch");
 
-module_param(dbg_uwbemul, uint, 0444);
+module_param(dbg_uwbemul, uint, 0);
 MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
 module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
 MODULE_PARM_DESC(uwb_state,
@@ -8416,7 +8352,6 @@ static int __init thinkpad_acpi_module_init(void)
                                                PCI_VENDOR_ID_IBM;
                tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
                tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
-               tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
        }
        for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
                ret = ibm_init(&ibms_init[i]);
@@ -8427,9 +8362,6 @@ static int __init thinkpad_acpi_module_init(void)
                        return ret;
                }
        }
-
-       tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
-
        ret = input_register_device(tpacpi_inputdev);
        if (ret < 0) {
                printk(TPACPI_ERR "unable to register input device\n");
@@ -8439,6 +8371,7 @@ static int __init thinkpad_acpi_module_init(void)
                tp_features.input_device_registered = 1;
        }
 
+       tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
        return 0;
 }
 
index dc628cb2e762803b6f3374d33a99feae5b08640c..936bae560fa1f730255bde8a94c9493c953efdfd 100644 (file)
@@ -233,7 +233,6 @@ static int calculate_capacity(enum apm_source source)
                empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
                now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
                avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
-               break;
        case SOURCE_VOLTAGE:
                full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
                empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
index 4b38eaa9f5f0c2b3f59b2429f763e1616dcdba0f..8fefe5a73558895f5bb0df35b865366ea02e8618 100644 (file)
@@ -271,14 +271,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
                if (ret)
                        return ret;
 
-               val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
+               val->intval = (int)be16_to_cpu(ec_word) * 9760L / 32;
                break;
        case POWER_SUPPLY_PROP_CURRENT_AVG:
                ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
                if (ret)
                        return ret;
 
-               val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
+               val->intval = (int)be16_to_cpu(ec_word) * 15625L / 120;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
                ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
@@ -299,7 +299,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
                if (ret)
                        return ret;
 
-               val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
+               val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
                break;
        case POWER_SUPPLY_PROP_TEMP_AMBIENT:
                ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
@@ -313,7 +313,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
                if (ret)
                        return ret;
 
-               val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
+               val->intval = (int)be16_to_cpu(ec_word) * 6250 / 15;
                break;
        case POWER_SUPPLY_PROP_SERIAL_NUMBER:
                ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
index 0b6893ab577e6de959ca59028eb2478135f5b4d2..2bc9ef37b46f3d879d984a82cf86e0b68c2a78f2 100644 (file)
@@ -235,7 +235,6 @@ static void __exit rtc_exit(void)
 {
        rtc_dev_exit();
        class_destroy(rtc_class);
-       idr_destroy(&rtc_idr);
 }
 
 subsys_initcall(rtc_init);
index 66c2d6a6d3600c9beec6306f4e91d45720ae9645..473e5f25e3dd6b4cc3e665978ba05a0014e4476e 100644 (file)
@@ -723,9 +723,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
                }
        }
 
-       cmos_rtc.dev = dev;
-       dev_set_drvdata(dev, &cmos_rtc);
-
        cmos_rtc.rtc = rtc_device_register(driver_name, dev,
                                &cmos_rtc_ops, THIS_MODULE);
        if (IS_ERR(cmos_rtc.rtc)) {
@@ -733,6 +730,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
                goto cleanup0;
        }
 
+       cmos_rtc.dev = dev;
+       dev_set_drvdata(dev, &cmos_rtc);
        rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
 
        spin_lock_irq(&rtc_lock);
index 44c4399ee7144fc2e8cffca50d8a96426b05a8fb..03ea530981d1e66328625fb26567cc40e35f9f23 100644 (file)
@@ -271,13 +271,12 @@ static int coh901331_resume(struct platform_device *pdev)
 {
        struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
 
-       if (device_may_wakeup(&pdev->dev)) {
+       if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(rtap->irq);
-       } else {
+       else
                clk_enable(rtap->clk);
                writel(rtap->irqmaskstore, rtap->virtbase + COH901331_IRQ_MASK);
                clk_disable(rtap->clk);
-       }
        return 0;
 }
 #else
index 861d91d012e053a4c4907015a93e67ea436318b3..eb99ee4fa0f5778a39e4faddd892766b06d0f078 100644 (file)
@@ -775,7 +775,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
 
 read_rtc:
        /* read RTC registers */
-       tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf);
+       tmp = ds1307->read_block_data(ds1307->client, 0, 8, buf);
        if (tmp != 8) {
                pr_debug("read error %d\n", tmp);
                err = -EIO;
@@ -860,7 +860,7 @@ read_rtc:
                if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
                        tmp += 12;
                i2c_smbus_write_byte_data(client,
-                               ds1307->offset + DS1307_REG_HOUR,
+                               DS1307_REG_HOUR,
                                bin2bcd(tmp));
        }
 
index 43bfffe1ec2b381fe8703bac1247f31c43759991..e0d7b9991505564c412e4771f8061154c5adc449 100644 (file)
@@ -456,6 +456,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
        pr_debug("s3c2410_rtc: RTCCON=%02x\n",
                 readb(s3c_rtc_base + S3C2410_RTCCON));
 
+       s3c_rtc_setfreq(&pdev->dev, 1);
+
        device_init_wakeup(&pdev->dev, 1);
 
        /* register RTC and exit */
@@ -472,9 +474,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
        rtc->max_user_freq = 128;
 
        platform_set_drvdata(pdev, rtc);
-
-       s3c_rtc_setfreq(&pdev->dev, 1);
-
        return 0;
 
  err_nortc:
index 126f240715a426ecdc115f3fc514173c1442d5cb..138124fcfcade55165aca7ee1088cad8d7ec74d7 100644 (file)
@@ -618,7 +618,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
        old_regs = set_irq_regs(regs);
        s390_idle_check();
        irq_enter();
-       __get_cpu_var(s390_idle).nohz_delay = 1;
        if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
                /* Serve timer interrupts first. */
                clock_comparator_work();
index a5b8e7b927019e66bdcad4cb4335a18fd3cb31a3..0391d759dfdbd5fb1676f68920f67cec6d8ef37a 100644 (file)
@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                /* Does this really need to be GFP_DMA? */
                                p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
                                if(!p) {
-                                       dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+                                       kfree (usg);
+                                       dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
                                          usg->sg[i].count,i,usg->count));
-                                       kfree(usg);
                                        rcode = -ENOMEM;
                                        goto cleanup;
                                }
index 3e89f8e06cbcb997c6f7cb9534dd715beddd1fb0..63b521d615f2bd992ffb030cbb2f04f2b817e18e 100644 (file)
@@ -3171,16 +3171,13 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                                tinfo->curr.transport_version = 2;
                                tinfo->goal.transport_version = 2;
                                tinfo->goal.ppr_options = 0;
-                               if (scb != NULL) {
-                                       /*
-                                        * Remove any SCBs in the waiting
-                                        * for selection queue that may
-                                        * also be for this target so that
-                                        * command ordering is preserved.
-                                        */
-                                       ahd_freeze_devq(ahd, scb);
-                                       ahd_qinfifo_requeue_tail(ahd, scb);
-                               }
+                               /*
+                                * Remove any SCBs in the waiting for selection
+                                * queue that may also be for this target so
+                                * that command ordering is preserved.
+                                */
+                               ahd_freeze_devq(ahd, scb);
+                               ahd_qinfifo_requeue_tail(ahd, scb);
                                printerror = 0;
                        }
                } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
@@ -3197,16 +3194,13 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                                      MSG_EXT_WDTR_BUS_8_BIT,
                                      AHD_TRANS_CUR|AHD_TRANS_GOAL,
                                      /*paused*/TRUE);
-                       if (scb != NULL) {
-                               /*
-                                * Remove any SCBs in the waiting for
-                                * selection queue that may also be for
-                                * this target so that command ordering
-                                * is preserved.
-                                */
-                               ahd_freeze_devq(ahd, scb);
-                               ahd_qinfifo_requeue_tail(ahd, scb);
-                       }
+                       /*
+                        * Remove any SCBs in the waiting for selection
+                        * queue that may also be for this target so that
+                        * command ordering is preserved.
+                        */
+                       ahd_freeze_devq(ahd, scb);
+                       ahd_qinfifo_requeue_tail(ahd, scb);
                        printerror = 0;
                } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
                        && ppr_busfree == 0) {
@@ -3223,16 +3217,13 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                                        /*ppr_options*/0,
                                        AHD_TRANS_CUR|AHD_TRANS_GOAL,
                                        /*paused*/TRUE);
-                       if (scb != NULL) {
-                               /*
-                                * Remove any SCBs in the waiting for
-                                * selection queue that may also be for
-                                * this target so that command ordering
-                                * is preserved.
-                                */
-                               ahd_freeze_devq(ahd, scb);
-                               ahd_qinfifo_requeue_tail(ahd, scb);
-                       }
+                       /*
+                        * Remove any SCBs in the waiting for selection
+                        * queue that may also be for this target so that
+                        * command ordering is preserved.
+                        */
+                       ahd_freeze_devq(ahd, scb);
+                       ahd_qinfifo_requeue_tail(ahd, scb);
                        printerror = 0;
                } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
                        && ahd_sent_msg(ahd, AHDMSG_1B,
@@ -3260,7 +3251,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
         * the message phases.  We check it last in case we
         * had to send some other message that caused a busfree.
         */
-       if (scb != NULL && printerror != 0
+       if (printerror != 0
         && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
         && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
 
index 9e71ac611146ab86432664af1c2a40c7d2070ea0..477542602284661dbcb7d0d65d65223d612d3ec4 100644 (file)
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
                if (info->scsi.phase == PHASE_IDLE)
                        fas216_kick(info);
 
-               mod_timer(&info->eh_timer, jiffies + 30 * HZ);
+               mod_timer(&info->eh_timer, 30 * HZ);
                spin_unlock_irqrestore(&info->host_lock, flags);
 
                /*
index bb96d7496215a9908029684d57fee6db730d36ea..9e8fce0f0c1b3f1022b5658a2ad503fcc810abf6 100644 (file)
@@ -4174,14 +4174,6 @@ static int ioc_general(void __user *arg, char *cmnd)
     ha = gdth_find_ha(gen.ionode);
     if (!ha)
         return -EFAULT;
-
-    if (gen.data_len > INT_MAX)
-        return -EINVAL;
-    if (gen.sense_len > INT_MAX)
-        return -EINVAL;
-    if (gen.data_len + gen.sense_len > INT_MAX)
-        return -EINVAL;
-
     if (gen.data_len + gen.sense_len != 0) {
         if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
                                      FALSE, &paddr)))
index 2d66fac56180e930bda9fb66125ddb93da062713..bb2c696c006acc6f59d82b0aeb1d06980e5dc45c 100644 (file)
@@ -1969,7 +1969,7 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
        DECLARE_COMPLETION_ONSTACK(comp);
        int wait;
        unsigned long flags;
-       signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+       signed long timeout = init_timeout * HZ;
 
        ENTER;
        do {
@@ -2720,7 +2720,6 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
        if (crq->valid & 0x80) {
                if (++async_crq->cur == async_crq->size)
                        async_crq->cur = 0;
-               rmb();
        } else
                crq = NULL;
 
@@ -2743,7 +2742,6 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
        if (crq->valid & 0x80) {
                if (++queue->cur == queue->size)
                        queue->cur = 0;
-               rmb();
        } else
                crq = NULL;
 
@@ -2792,14 +2790,12 @@ static void ibmvfc_tasklet(void *data)
                while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
                        ibmvfc_handle_async(async, vhost);
                        async->valid = 0;
-                       wmb();
                }
 
                /* Pull all the valid messages off the CRQ */
                while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
                        ibmvfc_handle_crq(crq, vhost);
                        crq->valid = 0;
-                       wmb();
                }
 
                vio_enable_interrupts(vdev);
@@ -2807,12 +2803,10 @@ static void ibmvfc_tasklet(void *data)
                        vio_disable_interrupts(vdev);
                        ibmvfc_handle_async(async, vhost);
                        async->valid = 0;
-                       wmb();
                } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
                        vio_disable_interrupts(vdev);
                        ibmvfc_handle_crq(crq, vhost);
                        crq->valid = 0;
-                       wmb();
                } else
                        done = 1;
        }
index ef8e9f878973776de7992607c97f92d425c979b1..007fa1c9ef14eedbab72484577bb5683fe9dda19 100644 (file)
@@ -38,7 +38,6 @@
 #define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT       \
                (IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
 #define IBMVFC_INIT_TIMEOUT            120
-#define IBMVFC_ABORT_WAIT_TIMEOUT      40
 #define IBMVFC_MAX_REQUESTS_DEFAULT    100
 
 #define IBMVFC_DEBUG                   0
index aab4a39cf84df83112d767d5784f72b79f67734b..f1a4246f890c044a9cb548366cd7a79865a42f4a 100644 (file)
@@ -384,12 +384,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 
        WARN_ON(hdrlength >= 256);
        hdr->hlength = hdrlength & 0xFF;
-       hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
 
        if (session->tt->init_task && session->tt->init_task(task))
                return -EIO;
 
        task->state = ISCSI_TASK_RUNNING;
+       hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
        session->cmdsn++;
 
        conn->scsicmd_pdus_cnt++;
@@ -2823,15 +2823,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
                session->state = ISCSI_STATE_TERMINATE;
        else if (conn->stop_stage != STOP_CONN_RECOVER)
                session->state = ISCSI_STATE_IN_RECOVERY;
-
-       old_stop_stage = conn->stop_stage;
-       conn->stop_stage = flag;
        spin_unlock_bh(&session->lock);
 
        del_timer_sync(&conn->transport_timer);
        iscsi_suspend_tx(conn);
 
        spin_lock_bh(&session->lock);
+       old_stop_stage = conn->stop_stage;
+       conn->stop_stage = flag;
        conn->c_stage = ISCSI_CONN_STOPPED;
        spin_unlock_bh(&session->lock);
 
index 0ee989fbb0fc34959ae5cf2e017c4dfc4e1d6944..e15501170698a19957d22f92c6fbc2e3e89ac0c6 100644 (file)
@@ -346,7 +346,6 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
 static struct ata_port_operations sas_sata_ops = {
        .phy_reset              = sas_ata_phy_reset,
        .post_internal_cmd      = sas_ata_post_internal,
-       .qc_defer               = ata_std_qc_defer,
        .qc_prep                = ata_noop_qc_prep,
        .qc_issue               = sas_ata_qc_issue,
        .qc_fill_rtf            = sas_ata_qc_fill_rtf,
@@ -395,15 +394,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
 void sas_ata_task_abort(struct sas_task *task)
 {
        struct ata_queued_cmd *qc = task->uldd_task;
-       struct request_queue *q = qc->scsicmd->device->request_queue;
        struct completion *waiting;
-       unsigned long flags;
 
        /* Bounce SCSI-initiated commands to the SCSI EH */
        if (qc->scsicmd) {
-               spin_lock_irqsave(q->queue_lock, flags);
                blk_abort_request(qc->scsicmd->request);
-               spin_unlock_irqrestore(q->queue_lock, flags);
                scsi_schedule_eh(qc->scsicmd->device->host);
                return;
        }
index 39fb9aa93fe51175e4a18ae3c4b47cc4e3d2d3ce..1c558d3bce18c4817623078944e86557eb3adb9f 100644 (file)
@@ -1025,8 +1025,6 @@ int __sas_task_abort(struct sas_task *task)
 void sas_task_abort(struct sas_task *task)
 {
        struct scsi_cmnd *sc = task->uldd_task;
-       struct request_queue *q = sc->device->request_queue;
-       unsigned long flags;
 
        /* Escape for libsas internal commands */
        if (!sc) {
@@ -1041,9 +1039,7 @@ void sas_task_abort(struct sas_task *task)
                return;
        }
 
-       spin_lock_irqsave(q->queue_lock, flags);
        blk_abort_request(sc->request);
-       spin_unlock_irqrestore(q->queue_lock, flags);
        scsi_schedule_eh(sc->device->host);
 }
 
index 202fa0f4b805c2987c76d0df2b72387bfc71097f..518712cc7253c2d46ad6a9450e0c4774d0d52e79 100644 (file)
@@ -3282,7 +3282,6 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
            compat_alloc_user_space(sizeof(struct megasas_iocpacket));
        int i;
        int error = 0;
-       compat_uptr_t ptr;
 
        if (clear_user(ioc, sizeof(*ioc)))
                return -EFAULT;
@@ -3295,22 +3294,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
            copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
                return -EFAULT;
 
-       /*
-        * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
-        * sense_len is not null, so prepare the 64bit value under
-        * the same condition.
-        */
-       if (ioc->sense_len) {
-               void __user **sense_ioc_ptr =
-                       (void __user **)(ioc->frame.raw + ioc->sense_off);
-               compat_uptr_t *sense_cioc_ptr =
-                       (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
-               if (get_user(ptr, sense_cioc_ptr) ||
-                   put_user(compat_ptr(ptr), sense_ioc_ptr))
-                       return -EFAULT;
-       }
-
        for (i = 0; i < MAX_IOCTL_SGE; i++) {
+               compat_uptr_t ptr;
+
                if (get_user(ptr, &cioc->sgl[i].iov_base) ||
                    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
                    copy_in_user(&ioc->sgl[i].iov_len,
index f10bf70c58df6eed8f30c9adb8f1a2d790a54a7e..1743640070ca402979021a2ec0029bcffae21f62 100644 (file)
@@ -5721,8 +5721,6 @@ _scsih_remove(struct pci_dev *pdev)
        struct _sas_port *mpt2sas_port;
        struct _sas_device *sas_device;
        struct _sas_node *expander_sibling;
-       struct _raid_device *raid_device, *next;
-       struct MPT2SAS_TARGET *sas_target_priv_data;
        struct workqueue_struct *wq;
        unsigned long flags;
 
@@ -5736,21 +5734,6 @@ _scsih_remove(struct pci_dev *pdev)
        if (wq)
                destroy_workqueue(wq);
 
-       /* release all the volumes */
-       list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
-           list) {
-               if (raid_device->starget) {
-                       sas_target_priv_data =
-                           raid_device->starget->hostdata;
-                       sas_target_priv_data->deleted = 1;
-                       scsi_remove_target(&raid_device->starget->dev);
-               }
-               printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
-                   "(0x%016llx)\n", ioc->name,  raid_device->handle,
-                   (unsigned long long) raid_device->wwid);
-               _scsih_raid_device_remove(ioc, raid_device);
-       }
-
        /* free ports attached to the sas_host */
  retry_again:
        list_for_each_entry(mpt2sas_port,
index cae6b2cf492fb099cd2b7b6a2257dd8a768c2105..c790d45876c47591d04f8906b79aaa134cbccb2d 100644 (file)
@@ -657,7 +657,6 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
        { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
-       { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
 
        { }     /* terminate list */
 };
index 49ac4148493b9746a31cfdc50dcbe8a6d705f618..8371d917a9a2408242a2439c9c69f298cc012964 100644 (file)
@@ -1640,10 +1640,8 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
        uint16_t mb[MAILBOX_REGISTER_COUNT], i;
        int err;
 
-       spin_unlock_irq(ha->host->host_lock);
        err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
                               &ha->pdev->dev);
-       spin_lock_irq(ha->host->host_lock);
        if (err) {
                printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
                       ql1280_board_tbl[ha->devnum].fwname, err);
@@ -1701,10 +1699,8 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
                return -ENOMEM;
 #endif
 
-       spin_unlock_irq(ha->host->host_lock);
        err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
                               &ha->pdev->dev);
-       spin_lock_irq(ha->host->host_lock);
        if (err) {
                printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
                       ql1280_board_tbl[ha->devnum].fwname, err);
index 65ef03ca56873ac001f22a2fb4c5b1296c65a04b..f3d1d1afa95b268a6ac47626e004f522b7223918 100644 (file)
@@ -453,5 +453,6 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
 
 #endif /* _QLA_GBL_H */
index f3e5e30dd5ffde3b12735fed344a6a77209e548a..b20a7169aac28990a2debb7670716668b9815d37 100644 (file)
@@ -1347,22 +1347,16 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 
        sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
        if (IS_FWI2_CAPABLE(ha)) {
-               if (scsi_status & SS_SENSE_LEN_VALID)
-                       sense_len = le32_to_cpu(sts24->sense_len);
-               if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
-                       rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
-               if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
-                       resid_len = le32_to_cpu(sts24->rsp_residual_count);
-               if (comp_status == CS_DATA_UNDERRUN)
-                       fw_resid_len = le32_to_cpu(sts24->residual_len);
+               sense_len = le32_to_cpu(sts24->sense_len);
+               rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
+               resid_len = le32_to_cpu(sts24->rsp_residual_count);
+               fw_resid_len = le32_to_cpu(sts24->residual_len);
                rsp_info = sts24->data;
                sense_data = sts24->data;
                host_to_fcp_swap(sts24->data, sizeof(sts24->data));
        } else {
-               if (scsi_status & SS_SENSE_LEN_VALID)
-                       sense_len = le16_to_cpu(sts->req_sense_length);
-               if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
-                       rsp_info_len = le16_to_cpu(sts->rsp_info_len);
+               sense_len = le16_to_cpu(sts->req_sense_length);
+               rsp_info_len = le16_to_cpu(sts->rsp_info_len);
                resid_len = le32_to_cpu(sts->residual_length);
                rsp_info = sts->rsp_info;
                sense_data = sts->req_sense_data;
@@ -1449,62 +1443,38 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                break;
 
        case CS_DATA_UNDERRUN:
-               DEBUG2(printk(KERN_INFO
-                   "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
-                   "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
-                   vha->host_no, cp->device->id, cp->device->lun, comp_status,
-                   scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
-                   cp->underflow));
-
+               resid = resid_len;
                /* Use F/W calculated residual length. */
-               resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
-               scsi_set_resid(cp, resid);
-               if (scsi_status & SS_RESIDUAL_UNDER) {
-                       if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
-                               DEBUG2(printk(
-                                   "scsi(%ld:%d:%d:%d) Dropped frame(s) "
-                                   "detected (%x of %x bytes)...residual "
-                                   "length mismatch...retrying command.\n",
-                                   vha->host_no, cp->device->channel,
-                                   cp->device->id, cp->device->lun, resid,
-                                   scsi_bufflen(cp)));
-
-                               cp->result = DID_ERROR << 16 | lscsi_status;
-                               break;
+               if (IS_FWI2_CAPABLE(ha)) {
+                       if (!(scsi_status & SS_RESIDUAL_UNDER)) {
+                               lscsi_status = 0;
+                       } else if (resid != fw_resid_len) {
+                               scsi_status &= ~SS_RESIDUAL_UNDER;
+                               lscsi_status = 0;
                        }
+                       resid = fw_resid_len;
+               }
 
-                       if (!lscsi_status &&
-                           ((unsigned)(scsi_bufflen(cp) - resid) <
-                           cp->underflow)) {
-                               qla_printk(KERN_INFO, ha,
-                                   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
-                                   "detected (%x of %x bytes)...returning "
-                                   "error status.\n", vha->host_no,
-                                   cp->device->channel, cp->device->id,
-                                   cp->device->lun, resid, scsi_bufflen(cp));
-
-                               cp->result = DID_ERROR << 16;
-                               break;
-                       }
-               } else if (!lscsi_status) {
-                       DEBUG2(printk(
-                           "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
-                           "(%x of %x bytes)...firmware reported underrun..."
-                           "retrying command.\n", vha->host_no,
-                           cp->device->channel, cp->device->id,
-                           cp->device->lun, resid, scsi_bufflen(cp)));
+               if (scsi_status & SS_RESIDUAL_UNDER) {
+                       scsi_set_resid(cp, resid);
+               } else {
+                       DEBUG2(printk(KERN_INFO
+                           "scsi(%ld:%d:%d) UNDERRUN status detected "
+                           "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
+                           "os_underflow=0x%x\n", vha->host_no,
+                           cp->device->id, cp->device->lun, comp_status,
+                           scsi_status, resid_len, resid, cp->cmnd[0],
+                           cp->underflow));
 
-                       cp->result = DID_ERROR << 16;
-                       break;
                }
 
-               cp->result = DID_OK << 16 | lscsi_status;
-
                /*
                 * Check to see if SCSI Status is non zero. If so report SCSI
                 * Status.
                 */
                if (lscsi_status != 0) {
+                       cp->result = DID_OK << 16 | lscsi_status;
+
                        if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
                                DEBUG2(printk(KERN_INFO
                                    "scsi(%ld): QUEUE FULL status detected "
@@ -1531,6 +1501,42 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                break;
 
                        qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
+               } else {
+                       /*
+                        * If RISC reports underrun and target does not report
+                        * it then we must have a lost frame, so tell upper
+                        * layer to retry it by reporting an error.
+                        */
+                       if (!(scsi_status & SS_RESIDUAL_UNDER)) {
+                               DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
+                                             "frame(s) detected (%x of %x bytes)..."
+                                             "retrying command.\n",
+                                       vha->host_no, cp->device->channel,
+                                       cp->device->id, cp->device->lun, resid,
+                                       scsi_bufflen(cp)));
+
+                               scsi_set_resid(cp, resid);
+                               cp->result = DID_ERROR << 16;
+                               break;
+                       }
+
+                       /* Handle mid-layer underflow */
+                       if ((unsigned)(scsi_bufflen(cp) - resid) <
+                           cp->underflow) {
+                               qla_printk(KERN_INFO, ha,
+                                          "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+                                          "detected (%x of %x bytes)...returning "
+                                          "error status.\n", vha->host_no,
+                                          cp->device->channel, cp->device->id,
+                                          cp->device->lun, resid,
+                                          scsi_bufflen(cp));
+
+                               cp->result = DID_ERROR << 16;
+                               break;
+                       }
+
+                       /* Everybody online, looking good... */
+                       cp->result = DID_OK << 16;
                }
                break;
 
@@ -2012,7 +2018,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 
        spin_lock_irq(&ha->hardware_lock);
 
-       vha = pci_get_drvdata(ha->pdev);
+       vha = qla25xx_get_host(rsp);
        qla24xx_process_response_queue(vha, rsp);
        if (!ha->mqenable) {
                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2240,28 +2246,30 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 
        /* If possible, enable MSI-X. */
        if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
-               !IS_QLA8432(ha) && !IS_QLA8001(ha))
-               goto skip_msi;
-
-       if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
-               (ha->pdev->subsystem_device == 0x7040 ||
-               ha->pdev->subsystem_device == 0x7041 ||
-               ha->pdev->subsystem_device == 0x1705)) {
-               DEBUG2(qla_printk(KERN_WARNING, ha,
-                       "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
-                       ha->pdev->subsystem_vendor,
-                       ha->pdev->subsystem_device));
-               goto skip_msi;
-       }
+           !IS_QLA8432(ha) && !IS_QLA8001(ha))
+               goto skip_msix;
 
        if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
                !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
                DEBUG2(qla_printk(KERN_WARNING, ha,
                "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
                        ha->pdev->revision, ha->fw_attributes));
+
                goto skip_msix;
        }
 
+       if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+           (ha->pdev->subsystem_device == 0x7040 ||
+               ha->pdev->subsystem_device == 0x7041 ||
+               ha->pdev->subsystem_device == 0x1705)) {
+               DEBUG2(qla_printk(KERN_WARNING, ha,
+                   "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
+                   ha->pdev->subsystem_vendor,
+                   ha->pdev->subsystem_device));
+
+               goto skip_msi;
+       }
+
        ret = qla24xx_enable_msix(ha, rsp);
        if (!ret) {
                DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2349,3 +2357,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
        msix->rsp = rsp;
        return ret;
 }
+
+struct scsi_qla_host *
+qla25xx_get_host(struct rsp_que *rsp)
+{
+       srb_t *sp;
+       struct qla_hw_data *ha = rsp->hw;
+       struct scsi_qla_host *vha = NULL;
+       struct sts_entry_24xx *pkt;
+       struct req_que *req;
+       uint16_t que;
+       uint32_t handle;
+
+       pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
+       que = MSW(pkt->handle);
+       handle = (uint32_t) LSW(pkt->handle);
+       req = ha->req_q_map[que];
+       if (handle < MAX_OUTSTANDING_COMMANDS) {
+               sp = req->outstanding_cmds[handle];
+               if (sp)
+                       return  sp->fcport->vha;
+               else
+                       goto base_que;
+       }
+base_que:
+       vha = pci_get_drvdata(ha->pdev);
+       return vha;
+}
index 4a69cc8c05e919b9b9780b214ad3148a51c14b30..e07b3617f019397188cd24674fdcfabe09e49eb5 100644 (file)
@@ -638,15 +638,11 @@ failed:
 
 static void qla_do_work(struct work_struct *work)
 {
-       unsigned long flags;
        struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
        struct scsi_qla_host *vha;
-       struct qla_hw_data *ha = rsp->hw;
 
-       spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
-       vha = pci_get_drvdata(ha->pdev);
+       vha = qla25xx_get_host(rsp);
        qla24xx_process_response_queue(vha, rsp);
-       spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
 }
 
 /* create response queue */
index bc3e3636a3b8180cf1c04bd5ea44e37de1a92543..c4103bef41b59cc8988be725fa74822bba9e5d5d 100644 (file)
@@ -914,8 +914,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
 static sector_t get_sdebug_capacity(void)
 {
        if (scsi_debug_virtual_gb > 0)
-               return (sector_t)scsi_debug_virtual_gb *
-                       (1073741824 / scsi_debug_sector_size);
+               return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
        else
                return sdebug_store_sectors;
 }
index 573921d00070d79f070bc9f3866f300839e4af98..1b0060b791e8dc6fe0b1dd4ace0a1e3f3ea96c7e 100644 (file)
@@ -301,20 +301,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
                if (scmd->device->allow_restart &&
                    (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
                        return FAILED;
-
-               if (blk_barrier_rq(scmd->request))
-                       /*
-                        * barrier requests should always retry on UA
-                        * otherwise block will get a spurious error
-                        */
-                       return NEEDS_RETRY;
-               else
-                       /*
-                        * for normal (non barrier) commands, pass the
-                        * UA upwards for a determination in the
-                        * completion functions
-                        */
-                       return SUCCESS;
+               return SUCCESS;
 
                /* these three are not supported */
        case COPY_ABORTED:
index d9564fb04f62cd5c390aed97547adf4f49b6fc24..b98f763931c5fadb9c0b438c7099786b6ac8d41b 100644 (file)
@@ -308,9 +308,6 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
                case SG_SCSI_RESET_DEVICE:
                        val = SCSI_TRY_RESET_DEVICE;
                        break;
-               case SG_SCSI_RESET_TARGET:
-                       val = SCSI_TRY_RESET_TARGET;
-                       break;
                case SG_SCSI_RESET_BUS:
                        val = SCSI_TRY_RESET_BUS;
                        break;
index b87fc30fad6ba17f1f8158acdcb436e6e589afb8..bc9a88145a71e9fe1813161cf6a9895bf99c8a61 100644 (file)
@@ -773,14 +773,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
         * we already took a copy of the original into rq->errors which
         * is what gets returned to the user
         */
-       if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
-               /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
-                * print since caller wants ATA registers. Only occurs on
-                * SCSI ATA PASS_THROUGH commands when CK_COND=1
-                */
-               if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
-                       ;
-               else if (!(req->cmd_flags & REQ_QUIET))
+       if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
+               if (!(req->cmd_flags & REQ_QUIET))
                        scsi_print_sense("", cmd);
                result = 0;
                /* BLOCK_PC may have set error */
@@ -2432,8 +2426,7 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
                sdev->sdev_state = SDEV_RUNNING;
        else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
                sdev->sdev_state = SDEV_CREATED;
-       else if (sdev->sdev_state != SDEV_CANCEL &&
-                sdev->sdev_state != SDEV_OFFLINE)
+       else
                return -EINVAL;
 
        spin_lock_irqsave(q->queue_lock, flags);
index ad136c2be5016d7bdd09387612c835d4edd10256..392d8db33905cbe31f1e2f96644552d9b692eb27 100644 (file)
@@ -954,11 +954,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
        list_for_each_entry(sdev, &shost->__devices, siblings) {
                if (sdev->channel != starget->channel ||
                    sdev->id != starget->id ||
-                   scsi_device_get(sdev))
+                   sdev->sdev_state == SDEV_DEL)
                        continue;
                spin_unlock_irqrestore(shost->host_lock, flags);
                scsi_remove_device(sdev);
-               scsi_device_put(sdev);
                spin_lock_irqsave(shost->host_lock, flags);
                goto restart;
        }
index db02e31bae391cb173f779f72055b368403ed25f..bf52decfdef48d41e0366ddcd892d4f0f9e05168 100644 (file)
@@ -1215,15 +1215,6 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
 {
        struct fc_vport *vport = transport_class_to_vport(dev);
        struct Scsi_Host *shost = vport_to_shost(vport);
-       unsigned long flags;
-
-       spin_lock_irqsave(shost->host_lock, flags);
-       if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
-               spin_unlock_irqrestore(shost->host_lock, flags);
-               return -EBUSY;
-       }
-       vport->flags |= FC_VPORT_DELETING;
-       spin_unlock_irqrestore(shost->host_lock, flags);
 
        fc_queue_work(shost, &vport->vport_delete_work);
        return count;
@@ -1813,9 +1804,6 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
        list_for_each_entry(vport, &fc_host->vports, peers) {
                if ((vport->channel == 0) &&
                    (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
-                       if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
-                               break;
-                       vport->flags |= FC_VPORT_DELETING;
                        match = 1;
                        break;
                }
@@ -3340,6 +3328,18 @@ fc_vport_terminate(struct fc_vport *vport)
        unsigned long flags;
        int stat;
 
+       spin_lock_irqsave(shost->host_lock, flags);
+       if (vport->flags & FC_VPORT_CREATING) {
+               spin_unlock_irqrestore(shost->host_lock, flags);
+               return -EBUSY;
+       }
+       if (vport->flags & (FC_VPORT_DEL)) {
+               spin_unlock_irqrestore(shost->host_lock, flags);
+               return -EALREADY;
+       }
+       vport->flags |= FC_VPORT_DELETING;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+
        if (i->f->vport_delete)
                stat = i->f->vport_delete(vport);
        else
@@ -3796,9 +3796,8 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
                return;
 
        while (!blk_queue_plugged(q)) {
-               if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
-                   !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
-                       break;
+               if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
+                               break;
 
                req = blk_fetch_request(q);
                if (!req)
index 81a9d25ecaba4eac3dcd1e20cac06cf3e22d925d..9093c7261f330085bf4ba843b18b8e53ad4270cd 100644 (file)
@@ -971,7 +971,6 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq)
 {
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->timeout = SD_TIMEOUT;
-       rq->retries = SD_MAX_RETRIES;
        rq->cmd[0] = SYNCHRONIZE_CACHE;
        rq->cmd_len = 10;
 }
@@ -2049,10 +2048,11 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        index = sdkp->index;
        dev = &sdp->sdev_gendev;
 
-       gd->major = sd_major((index & 0xf0) >> 4);
-       gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
-       gd->minors = SD_MINORS;
-
+       if (index < SD_MAX_DISKS) {
+               gd->major = sd_major((index & 0xf0) >> 4);
+               gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+               gd->minors = SD_MINORS;
+       }
        gd->fops = &sd_fops;
        gd->private_data = &sdkp->driver;
        gd->queue = sdkp->device->request_queue;
@@ -2141,12 +2141,6 @@ static int sd_probe(struct device *dev)
        if (error)
                goto out_put;
 
-       if (index >= SD_MAX_DISKS) {
-               error = -ENODEV;
-               sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
-               goto out_free_index;
-       }
-
        error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
        if (error)
                goto out_free_index;
index 3c8a0248ea45f69af399b56f5a91c78d115e6c7c..55b034b72708e5f8b5cc873082e3a44003325ba7 100644 (file)
@@ -591,6 +591,8 @@ static int ses_intf_add(struct device *cdev,
                ses_dev->page10_len = len;
                buf = NULL;
        }
+       kfree(hdr_buf);
+
        scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
        if (!scomp)
                goto err_free;
@@ -602,8 +604,6 @@ static int ses_intf_add(struct device *cdev,
                goto err_free;
        }
 
-       kfree(hdr_buf);
-
        edev->scratch = ses_dev;
        for (i = 0; i < components; i++)
                edev->component[i].scratch = scomp + i;
index 48ead154bd02778f46345be4d54159ab32a693ff..deac67e35ce92e950c18061b279219c25454df25 100644 (file)
@@ -348,8 +348,6 @@ static const struct pnp_device_id pnp_dev_table[] = {
        {       "FUJ02E6",              0       },
        /* Fujitsu Wacom 2FGT Tablet PC device */
        {       "FUJ02E7",              0       },
-       /* Fujitsu Wacom 1FGT Tablet PC device */
-       {       "FUJ02E9",              0       },
        /*
         * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
         * disguise)
index 7feb902c32cb99b0570e76e9d8b8a9e99f52d162..300cea768d746939762afee7f42a8ad2cf5d06a1 100644 (file)
@@ -930,83 +930,6 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
        }
 }
 
-#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
-/*
- * Write a string to the serial port
- * Note that this is called with interrupts already disabled
- */
-static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
-               const char *string, u_int count)
-{
-       unsigned int i;
-       cbd_t __iomem *bdp, *bdbase;
-       unsigned char *cpm_outp_addr;
-
-       /* Get the address of the host memory buffer.
-        */
-       bdp = pinfo->tx_cur;
-       bdbase = pinfo->tx_bd_base;
-
-       /*
-        * Now, do each character.  This is not as bad as it looks
-        * since this is a holding FIFO and not a transmitting FIFO.
-        * We could add the complexity of filling the entire transmit
-        * buffer, but we would just wait longer between accesses......
-        */
-       for (i = 0; i < count; i++, string++) {
-               /* Wait for transmitter fifo to empty.
-                * Ready indicates output is ready, and xmt is doing
-                * that, not that it is ready for us to send.
-                */
-               while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
-                       ;
-
-               /* Send the character out.
-                * If the buffer address is in the CPM DPRAM, don't
-                * convert it.
-                */
-               cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
-                                       pinfo);
-               *cpm_outp_addr = *string;
-
-               out_be16(&bdp->cbd_datlen, 1);
-               setbits16(&bdp->cbd_sc, BD_SC_READY);
-
-               if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
-                       bdp = bdbase;
-               else
-                       bdp++;
-
-               /* if a LF, also do CR... */
-               if (*string == 10) {
-                       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
-                               ;
-
-                       cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
-                                               pinfo);
-                       *cpm_outp_addr = 13;
-
-                       out_be16(&bdp->cbd_datlen, 1);
-                       setbits16(&bdp->cbd_sc, BD_SC_READY);
-
-                       if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
-                               bdp = bdbase;
-                       else
-                               bdp++;
-               }
-       }
-
-       /*
-        * Finally, Wait for transmitter & holding register to empty
-        *  and restore the IER
-        */
-       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
-               ;
-
-       pinfo->tx_cur = bdp;
-}
-#endif
-
 #ifdef CONFIG_CONSOLE_POLL
 /* Serial polling routines for writing and reading from the uart while
  * in an interrupt or debug context.
@@ -1076,7 +999,7 @@ static void cpm_put_poll_char(struct uart_port *port,
        static char ch[2];
 
        ch[0] = (char)c;
-       cpm_uart_early_write(pinfo, ch, 1);
+       cpm_uart_early_write(pinfo->port.line, ch, 1);
 }
 #endif /* CONFIG_CONSOLE_POLL */
 
@@ -1207,6 +1130,9 @@ static void cpm_uart_console_write(struct console *co, const char *s,
                                   u_int count)
 {
        struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+       unsigned int i;
+       cbd_t __iomem *bdp, *bdbase;
+       unsigned char *cp;
        unsigned long flags;
        int nolock = oops_in_progress;
 
@@ -1216,7 +1142,66 @@ static void cpm_uart_console_write(struct console *co, const char *s,
                spin_lock_irqsave(&pinfo->port.lock, flags);
        }
 
-       cpm_uart_early_write(pinfo, s, count);
+       /* Get the address of the host memory buffer.
+        */
+       bdp = pinfo->tx_cur;
+       bdbase = pinfo->tx_bd_base;
+
+       /*
+        * Now, do each character.  This is not as bad as it looks
+        * since this is a holding FIFO and not a transmitting FIFO.
+        * We could add the complexity of filling the entire transmit
+        * buffer, but we would just wait longer between accesses......
+        */
+       for (i = 0; i < count; i++, s++) {
+               /* Wait for transmitter fifo to empty.
+                * Ready indicates output is ready, and xmt is doing
+                * that, not that it is ready for us to send.
+                */
+               while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+                       ;
+
+               /* Send the character out.
+                * If the buffer address is in the CPM DPRAM, don't
+                * convert it.
+                */
+               cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+               *cp = *s;
+
+               out_be16(&bdp->cbd_datlen, 1);
+               setbits16(&bdp->cbd_sc, BD_SC_READY);
+
+               if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+                       bdp = bdbase;
+               else
+                       bdp++;
+
+               /* if a LF, also do CR... */
+               if (*s == 10) {
+                       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+                               ;
+
+                       cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+                       *cp = 13;
+
+                       out_be16(&bdp->cbd_datlen, 1);
+                       setbits16(&bdp->cbd_sc, BD_SC_READY);
+
+                       if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+                               bdp = bdbase;
+                       else
+                               bdp++;
+               }
+       }
+
+       /*
+        * Finally, Wait for transmitter & holding register to empty
+        *  and restore the IER
+        */
+       while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+               ;
+
+       pinfo->tx_cur = bdp;
 
        if (unlikely(nolock)) {
                local_irq_restore(flags);
index 2b550182a879db10044bc177649140bd52441033..18130f11238e6025b16682c9a00cca218727e370 100644 (file)
 #define  MX2_UCR3_RXDMUXSEL     (1<<2)  /* RXD Muxed Input Select, on mx2/mx3 */
 #define  UCR3_INVT      (1<<1)  /* Inverted Infrared transmission */
 #define  UCR3_BPEN      (1<<0)  /* Preset registers enable */
-#define  UCR4_CTSTL_SHF  10      /* CTS trigger level shift */
-#define  UCR4_CTSTL_MASK 0x3F    /* CTS trigger is 6 bits wide */
+#define  UCR4_CTSTL_32   (32<<10) /* CTS trigger level (32 chars) */
 #define  UCR4_INVR      (1<<9)  /* Inverted infrared reception */
 #define  UCR4_ENIRI     (1<<8)  /* Serial infrared interrupt enable */
 #define  UCR4_WKEN      (1<<7)  /* Wake interrupt enable */
@@ -591,9 +590,6 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
        return 0;
 }
 
-/* half the RX buffer size */
-#define CTSTL 16
-
 static int imx_startup(struct uart_port *port)
 {
        struct imx_port *sport = (struct imx_port *)port;
@@ -610,10 +606,6 @@ static int imx_startup(struct uart_port *port)
        if (USE_IRDA(sport))
                temp |= UCR4_IRSC;
 
-       /* set the trigger level for CTS */
-       temp &= ~(UCR4_CTSTL_MASK<<  UCR4_CTSTL_SHF);
-       temp |= CTSTL<<  UCR4_CTSTL_SHF;
-
        writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
 
        if (USE_IRDA(sport)) {
@@ -1287,7 +1279,7 @@ static int serial_imx_probe(struct platform_device *pdev)
                sport->use_irda = 1;
 #endif
 
-       if (pdata && pdata->init) {
+       if (pdata->init) {
                ret = pdata->init(pdev);
                if (ret)
                        goto clkput;
@@ -1300,7 +1292,7 @@ static int serial_imx_probe(struct platform_device *pdev)
 
        return 0;
 deinit:
-       if (pdata && pdata->exit)
+       if (pdata->exit)
                pdata->exit(pdev);
 clkput:
        clk_put(sport->clk);
@@ -1329,7 +1321,7 @@ static int serial_imx_remove(struct platform_device *pdev)
 
        clk_disable(sport->clk);
 
-       if (pdata && pdata->exit)
+       if (pdata->exit)
                pdata->exit(pdev);
 
        iounmap(sport->port.membase);
index 744d3f6e470987c773f72a733dbbc146fa358706..ef9c6a04ad8f01af0d402f9f0f03a08175c04524 100644 (file)
@@ -24,7 +24,6 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) },
-       { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC,  0x4318) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
index bbf1cb21a7d3509d00d1461a29759927ee157e1a..9681536163caa5bdf7a36fc1dcfff9f8da3a66ec 100644 (file)
@@ -233,9 +233,6 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
 {
        if (!cc->dev)
                return; /* We don't have a ChipCommon */
-       if (cc->dev->id.revision >= 11)
-               cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
-       ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
        ssb_pmu_init(cc);
        chipco_powercontrol_init(cc);
        ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
index 321d9ef17b965a252f6f385c80347238ed3d70ff..9e50896233aa969e5fa145bcf630dd0ba9055363 100644 (file)
@@ -167,7 +167,7 @@ err_pci:
 }
 
 /* Get the word-offset for a SSB_SPROM_XXX define. */
-#define SPOFF(offset)  (((offset) - SSB_SPROM_BASE1) / sizeof(u16))
+#define SPOFF(offset)  (((offset) - SSB_SPROM_BASE) / sizeof(u16))
 /* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
 #define SPEX16(_outvar, _offset, _mask, _shift)        \
        out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
@@ -253,7 +253,7 @@ static int sprom_do_read(struct ssb_bus *bus, u16 *sprom)
        int i;
 
        for (i = 0; i < bus->sprom_size; i++)
-               sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
+               sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2));
 
        return 0;
 }
@@ -284,7 +284,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
                        ssb_printk("75%%");
                else if (i % 2)
                        ssb_printk(".");
-               writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
+               writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2));
                mmiowb();
                msleep(20);
        }
@@ -620,28 +620,6 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
        int err = -ENOMEM;
        u16 *buf;
 
-       if (!ssb_is_sprom_available(bus)) {
-               ssb_printk(KERN_ERR PFX "No SPROM available!\n");
-               return -ENODEV;
-       }
-       if (bus->chipco.dev) {  /* can be unavailible! */
-               /*
-                * get SPROM offset: SSB_SPROM_BASE1 except for
-                * chipcommon rev >= 31 or chip ID is 0x4312 and
-                * chipcommon status & 3 == 2
-                */
-               if (bus->chipco.dev->id.revision >= 31)
-                       bus->sprom_offset = SSB_SPROM_BASE31;
-               else if (bus->chip_id == 0x4312 &&
-                        (bus->chipco.status & 0x03) == 2)
-                       bus->sprom_offset = SSB_SPROM_BASE31;
-               else
-                       bus->sprom_offset = SSB_SPROM_BASE1;
-       } else {
-               bus->sprom_offset = SSB_SPROM_BASE1;
-       }
-       ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
-
        buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
        if (!buf)
                goto out;
index 5f7154d9d04e09fc0ef2c892bfac73cac03b0f53..eb708431cb964d36e57ab80b9a9d18b1e5f7f7cb 100644 (file)
@@ -179,18 +179,3 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
 {
        return fallback_sprom;
 }
-
-/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
-bool ssb_is_sprom_available(struct ssb_bus *bus)
-{
-       /* status register only exists on chipcomon rev >= 11 and we need check
-          for >= 31 only */
-       /* this routine differs from specs as we do not access SPROM directly
-          on PCMCIA */
-       if (bus->bustype == SSB_BUSTYPE_PCI &&
-           bus->chipco.dev &&  /* can be unavailible! */
-           bus->chipco.dev->id.revision >= 31)
-               return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
-
-       return true;
-}
index 4b415577350eaa21f75eeb91d382b62b7b68a03f..23e5f39e8d2d444017deda3c3d866b914d6f8344 100755 (executable)
@@ -81,6 +81,8 @@ source "drivers/staging/rtl8192su/Kconfig"
 
 source "drivers/staging/rtl8192e/Kconfig"
 
+source "drivers/staging/mimio/Kconfig"
+
 source "drivers/staging/frontier/Kconfig"
 
 source "drivers/staging/android/Kconfig"
index 2b630f9e31b0946dd56dfe388c9b1b0509859678..73c026ed8e4874f6fc96c67b74c4cde594398784 100755 (executable)
@@ -23,6 +23,7 @@ obj-$(CONFIG_ALTERA_PCIE_CHDMA)       += altpciechdma/
 obj-$(CONFIG_RTL8187SE)                += rtl8187se/
 obj-$(CONFIG_RTL8192SU)                += rtl8192su/
 obj-$(CONFIG_RTL8192E)         += rtl8192e/
+obj-$(CONFIG_INPUT_MIMIO)      += mimio/
 obj-$(CONFIG_TRANZPORT)                += frontier/
 obj-$(CONFIG_ANDROID)          += android/
 obj-$(CONFIG_STAGING_DREAM)    += dream/
index 8a05725cb1d05ed6966eb8ce465bd194fe332820..43c57b7688abe658223c49834c56f6fb0be6d180 100644 (file)
@@ -609,13 +609,13 @@ static ssize_t class_set_picture(struct device *device,
 
 #define ASUS_OLED_DEVICE_ATTR(_file)           dev_attr_asus_oled_##_file
 
-static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO,
                   get_enabled, set_enabled);
-static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
+static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture);
 
-static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO,
                   class_get_enabled, class_set_enabled);
-static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
+static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture);
 
 static int asus_oled_probe(struct usb_interface *interface,
                           const struct usb_device_id *id)
index 1d6834d271fea06d529bb50ec6f2045842a5eee5..d63c889ce5574a6017cb627389eaf26b81fe47ee 100644 (file)
@@ -16,7 +16,6 @@ config COMEDI_DEBUG
 config COMEDI_PCI_DRIVERS
        tristate "Comedi PCI drivers"
        depends on COMEDI && PCI
-       select COMEDI_8255
        default N
        ---help---
          Enable lots of comedi PCI drivers to be built
@@ -24,7 +23,6 @@ config COMEDI_PCI_DRIVERS
 config COMEDI_PCMCIA_DRIVERS
        tristate "Comedi PCMCIA drivers"
        depends on COMEDI && PCMCIA && PCCARD
-       select COMEDI_8255
        default N
        ---help---
          Enable lots of comedi PCMCIA and PCCARD drivers to be built
@@ -35,6 +33,3 @@ config COMEDI_USB_DRIVERS
        default N
        ---help---
          Enable lots of comedi USB drivers to be built
-
-config COMEDI_8255
-       tristate
index 33b1d526837668bee5c457ccc104cd38b7b1f4f6..df2854d543ccc3857cc6e0efc95bfe7994aaf1d5 100644 (file)
@@ -8,10 +8,8 @@ obj-$(CONFIG_COMEDI)                   += comedi_test.o
 obj-$(CONFIG_COMEDI)                   += comedi_parport.o
 obj-$(CONFIG_COMEDI)                   += pcm_common.o
 
-# Comedi 8255 module
-obj-$(CONFIG_COMEDI_8255)              += 8255.o
-
 # Comedi PCI drivers
+obj-$(CONFIG_COMEDI_PCI_DRIVERS)       += 8255.o
 obj-$(CONFIG_COMEDI_PCI_DRIVERS)       += acl7225b.o
 obj-$(CONFIG_COMEDI_PCI_DRIVERS)       += addi_apci_035.o
 obj-$(CONFIG_COMEDI_PCI_DRIVERS)       += addi_apci_1032.o
index 27829e77ca08f110ae6597d5d0871b233ce5eff6..9aef87fc81dcd3560cda172fef89471b87b14bec 100644 (file)
@@ -123,7 +123,7 @@ static const struct ni_board_struct ni_boards[] = {
         .adbits = 12,
         .ai_fifo_depth = 1024,
         .alwaysdither = 0,
-        .gainlkup = ai_gain_4,
+        .gainlkup = ai_gain_16,
         .ai_speed = 5000,
         .n_aochan = 2,
         .aobits = 12,
index 5c9c1bc3eb61260be5a24ea969be3a041de802fc..cca4e869f0ecf10868a17d7d7cf060f6b94b87df 100644 (file)
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v2.4"
+#define DRIVER_VERSION "v2.2"
 #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
 #define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"
 /*
@@ -80,9 +80,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
  * 2.0:  PWM seems to be stable and is not interfering with the other functions
  * 2.1:  changed PWM API
  * 2.2:  added firmware kernel request to fix an udev problem
- * 2.3:  corrected a bug in bulk timeouts which were far too short
- * 2.4:  fixed a bug which causes the driver to hang when it ran out of data.
- *       Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it.
  *
  */
 
@@ -104,8 +101,8 @@ sampling rate. If you sample two channels you get 4kHz and so on.
 
 #define BOARDNAME "usbdux"
 
-/* timeout for the USB-transfer in ms*/
-#define BULK_TIMEOUT 1000
+/* timeout for the USB-transfer */
+#define EZTIMEOUT 30
 
 /* constants for "firmware" upload and download */
 #define USBDUXSUB_FIRMWARE 0xA0
@@ -534,7 +531,6 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
                }
        }
        /* tell comedi that data is there */
-       s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
        comedi_event(this_usbduxsub->comedidev, s);
 }
 
@@ -754,7 +750,7 @@ static int usbduxsub_start(struct usbduxsub *usbduxsub)
                                  /* Length */
                                  1,
                                  /* Timeout */
-                                 BULK_TIMEOUT);
+                                 EZTIMEOUT);
        if (errcode < 0) {
                dev_err(&usbduxsub->interface->dev,
                        "comedi_: control msg failed (start)\n");
@@ -784,7 +780,7 @@ static int usbduxsub_stop(struct usbduxsub *usbduxsub)
                                  /* Length */
                                  1,
                                  /* Timeout */
-                                 BULK_TIMEOUT);
+                                 EZTIMEOUT);
        if (errcode < 0) {
                dev_err(&usbduxsub->interface->dev,
                        "comedi_: control msg failed (stop)\n");
@@ -814,7 +810,7 @@ static int usbduxsub_upload(struct usbduxsub *usbduxsub,
                                  /* length */
                                  len,
                                  /* timeout */
-                                 BULK_TIMEOUT);
+                                 EZTIMEOUT);
        dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode);
        if (errcode < 0) {
                dev_err(&usbduxsub->interface->dev, "comedi_: upload failed\n");
@@ -1114,7 +1110,7 @@ static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type)
                              usb_sndbulkpipe(this_usbduxsub->usbdev,
                                              COMMAND_OUT_EP),
                              this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
-                             &nsent, BULK_TIMEOUT);
+                             &nsent, 10);
        if (result < 0)
                dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
                        "could not transmit dux_command to the usb-device, "
@@ -1134,7 +1130,7 @@ static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command)
                                      usb_rcvbulkpipe(this_usbduxsub->usbdev,
                                                      COMMAND_IN_EP),
                                      this_usbduxsub->insnBuffer, SIZEINSNBUF,
-                                     &nrec, BULK_TIMEOUT);
+                                     &nrec, 1);
                if (result < 0) {
                        dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
                                "insn: USB error %d while receiving DUX command"
index f6e04f83cd232cf0cbd3af979d02949eb8cfa432..ef8fcc8c67bd7db85cbc14607967a70e26487147 100644 (file)
@@ -202,7 +202,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
     t->value = temp;                                                   \
     return count;                                                      \
   }                                                                    \
-  static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value);
+  static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
 
 show_int(enable);
 show_int(offline);
index b12237f90db24e1e63a2013af0b57a1d130b7b96..c2809f2a2ce0e0e72b1b930f39929a50179d3cd5 100644 (file)
@@ -306,9 +306,9 @@ void HvCleanup(void)
        DPRINT_ENTER(VMBUS);
 
        if (gHvContext.SignalEventBuffer) {
-               kfree(gHvContext.SignalEventBuffer);
                gHvContext.SignalEventBuffer = NULL;
                gHvContext.SignalEventParam = NULL;
+               kfree(gHvContext.SignalEventBuffer);
        }
 
        if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
index 3a38103ecfbd7c74126650df841d6f5f535e4304..f69ae33a91e3788a8db90f9d67f738a3a1430454 100644 (file)
@@ -192,7 +192,7 @@ Description:
 static inline u64
 GetRingBufferIndices(RING_BUFFER_INFO* RingInfo)
 {
-       return (u64)RingInfo->RingBuffer->WriteIndex << 32;
+       return ((u64)RingInfo->RingBuffer->WriteIndex << 32) || RingInfo->RingBuffer->ReadIndex;
 }
 
 
index f05f4e125c48893a2b249fd0b2eb1b6452570bdd..26d79975387cc0b09e3335b8aeffc1b28af457ab 100644 (file)
@@ -756,7 +756,6 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
 
        ret = RndisFilterSetPacketFilter(Device,
                                         NDIS_PACKET_TYPE_BROADCAST |
-                                        NDIS_PACKET_TYPE_ALL_MULTICAST |
                                         NDIS_PACKET_TYPE_DIRECTED);
        if (ret == 0)
                Device->State = RNDIS_DEV_DATAINITIALIZED;
index 3d8ff086fc7f40cc1c986234d394e90b789cddf6..69c14066c479e1a7106e5a2dcacecc6f34f3515c 100644 (file)
 #include "VmbusApi.h"
 
 /* Defines */
-#define STORVSC_RING_BUFFER_SIZE                       (20*PAGE_SIZE)
+#define STORVSC_RING_BUFFER_SIZE                       (10*PAGE_SIZE)
 #define BLKVSC_RING_BUFFER_SIZE                                (20*PAGE_SIZE)
 
-#define STORVSC_MAX_IO_REQUESTS                                128
+#define STORVSC_MAX_IO_REQUESTS                                64
 
 /*
  * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
index 547261d2537c662da0a2a5e9eeeeab7d230fb76b..0d7459e2d0360b8996b104e1385df56bd9fd8150 100644 (file)
@@ -392,9 +392,6 @@ static const struct net_device_ops device_ops = {
        .ndo_start_xmit =               netvsc_start_xmit,
        .ndo_get_stats =                netvsc_get_stats,
        .ndo_set_multicast_list =       netvsc_set_multicast_list,
-       .ndo_change_mtu =               eth_change_mtu,
-       .ndo_validate_addr =            eth_validate_addr,
-       .ndo_set_mac_address =          eth_mac_addr,
 };
 
 static int netvsc_probe(struct device *device)
@@ -416,7 +413,8 @@ static int netvsc_probe(struct device *device)
        if (!net_drv_obj->Base.OnDeviceAdd)
                return -1;
 
-       net = alloc_etherdev(sizeof(struct net_device_context));
+       net = alloc_netdev(sizeof(struct net_device_context), "seth%d",
+                          ether_setup);
        if (!net)
                return -1;
 
index 2a4b147b0b388aa1b718eb2fa1a471bfca64e0f7..d49dc21d4cb4a4117c570e5818dee8b2ec5a19eb 100644 (file)
@@ -532,7 +532,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
 
                ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
 
-               if (bounce_addr == 0)
+               if (j == 0)
                        bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
 
                while (srclen) {
@@ -593,7 +593,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
                destlen = orig_sgl[i].length;
                ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
 
-               if (bounce_addr == 0)
+               if (j == 0)
                        bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
 
                while (destlen) {
@@ -652,7 +652,6 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
        unsigned int request_size = 0;
        int i;
        struct scatterlist *sgl;
-       unsigned int sg_count = 0;
 
        DPRINT_ENTER(STORVSC_DRV);
 
@@ -737,7 +736,6 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
        request->DataBuffer.Length = scsi_bufflen(scmnd);
        if (scsi_sg_count(scmnd)) {
                sgl = (struct scatterlist *)scsi_sglist(scmnd);
-               sg_count = scsi_sg_count(scmnd);
 
                /* check if we need to bounce the sgl */
                if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -772,12 +770,11 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
                                              scsi_sg_count(scmnd));
 
                        sgl = cmd_request->bounce_sgl;
-                       sg_count = cmd_request->bounce_sgl_count;
                }
 
                request->DataBuffer.Offset = sgl[0].offset;
 
-               for (i = 0; i < sg_count; i++) {
+               for (i = 0; i < scsi_sg_count(scmnd); i++) {
                        DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n",
                                   i, sgl[i].length, sgl[i].offset);
                        request->DataBuffer.PfnArray[i] =
index 6acc49a55a5746150e9d392f8d8191939be3ead5..894eecfc63ca5bd3e8e84fb2b5bf11dc4680df05 100644 (file)
@@ -24,8 +24,6 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/sysctl.h>
-#include <linux/pci.h>
-#include <linux/dmi.h>
 #include "osd.h"
 #include "logging.h"
 #include "vmbus.h"
@@ -948,19 +946,6 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
        }
 }
 
-static struct dmi_system_id __initdata microsoft_hv_dmi_table[] = {
-       {
-               .ident = "Hyper-V",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
-                       DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
-               },
-       },
-       { },
-};
-MODULE_DEVICE_TABLE(dmi, microsoft_hv_dmi_table);
-
 static int __init vmbus_init(void)
 {
        int ret = 0;
@@ -972,9 +957,6 @@ static int __init vmbus_init(void)
                vmbus_loglevel, HIWORD(vmbus_loglevel), LOWORD(vmbus_loglevel));
        /* Todo: it is used for loglevel, to be ported to new kernel. */
 
-       if (!dmi_check_system(microsoft_hv_dmi_table))
-               return -ENODEV;
-
        ret = vmbus_bus_init(VmbusInitialize);
 
        DPRINT_EXIT(VMBUS_DRV);
@@ -991,18 +973,6 @@ static void __exit vmbus_exit(void)
        return;
 }
 
-/*
- * We use a PCI table to determine if we should autoload this driver  This is
- * needed by distro tools to determine if the hyperv drivers should be
- * installed and/or configured.  We don't do anything else with the table, but
- * it needs to be present.
- */
-const static struct pci_device_id microsoft_hv_pci_table[] = {
-       { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
-       { 0 }
-};
-MODULE_DEVICE_TABLE(pci, microsoft_hv_pci_table);
-
 MODULE_LICENSE("GPL");
 module_param(vmbus_irq, int, S_IRUGO);
 module_param(vmbus_loglevel, int, S_IRUGO);
index bc1ffbed3c8a30ae3421633f52a48826a8c3319d..7852d4a960c50fec42842826c209a135eb9dd71d 100644 (file)
@@ -2,7 +2,6 @@ config LINE6_USB
        tristate "Line6 USB support"
        depends on USB && SND
        select SND_RAWMIDI
-       select SND_PCM
        help
          This is a driver for the guitar amp, cab, and effects modeller
          PODxt Pro by Line6 (and similar devices), supporting the
index 13cb4c06b3c28afee025757c1a2fe28774d4357a..23ad08e17f84c49fbf0ccbde4bb2227f7233d3e8 100644 (file)
@@ -259,108 +259,108 @@ VARIAX_PARAM_R(float, mix2);
 VARIAX_PARAM_R(float, mix1);
 VARIAX_PARAM_R(int, pickup_wiring);
 
-static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak);
-static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position, pod_set_wah_position);
-static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain);
-static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position);
-static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold);
-static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan);
-static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup);
-static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model, pod_set_amp_model);
-static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive);
-static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass);
-static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid);
-static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
-static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble);
-static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid, pod_set_highmid);
-static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol);
-static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix);
-static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup);
-static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency);
-static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence, pod_set_presence);
-static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass);
-static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable);
-static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold);
-static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time);
-static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable);
-static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable);
-static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time);
-static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable);
-static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1);
-static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1);
-static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value);
-static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass);
-static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2);
-static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix);
-static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3);
-static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable);
-static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type);
-static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay);
-static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone);
-static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay);
-static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post);
-static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency);
-static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass);
-static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable);
-static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut);
-static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut);
-static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum);
-static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post);
-static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post);
-static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model, pod_set_di_model);
-static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay, pod_set_di_delay);
-static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable);
-static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value);
-static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2);
-static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3);
-static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4);
-static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5);
-static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix);
-static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post);
-static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model);
-static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency);
-static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass);
-static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision);
-static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision);
-static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable);
-static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap);
-static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign);
-static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency);
-static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner);
-static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection);
-static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model);
-static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model);
-static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel);
-static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency);
-static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency);
-static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value);
-static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2);
-static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3);
-static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4);
-static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5);
-static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6);
-static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select);
-static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4);
-static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5);
-static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post);
-static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model, pod_set_delay_model);
-static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model);
-static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb);
-static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb);
-static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model, pod_set_wah_model);
-static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume);
-static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off);
-static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select);
-static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage);
-static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain);
-static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass);
-static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain);
-static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass);
-static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain);
-static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass);
-static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass);
-static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain);
-static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass);
+static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak);
+static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position, pod_set_wah_position);
+static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain);
+static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position);
+static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold);
+static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan);
+static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup);
+static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model, pod_set_amp_model);
+static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive);
+static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass);
+static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid);
+static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
+static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble);
+static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid, pod_set_highmid);
+static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol);
+static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix);
+static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup);
+static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency);
+static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence, pod_set_presence);
+static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass);
+static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable);
+static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold);
+static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time);
+static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable);
+static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable);
+static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time);
+static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable);
+static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1);
+static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1);
+static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value);
+static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass);
+static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2);
+static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix);
+static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3);
+static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable);
+static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type);
+static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay);
+static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone);
+static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay);
+static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post);
+static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency);
+static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass);
+static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable);
+static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut);
+static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut);
+static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum);
+static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post);
+static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post);
+static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model, pod_set_di_model);
+static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay, pod_set_di_delay);
+static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable);
+static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value);
+static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2);
+static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3);
+static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4);
+static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5);
+static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix);
+static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post);
+static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model);
+static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency);
+static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass);
+static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision);
+static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision);
+static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable);
+static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap);
+static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign);
+static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency);
+static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner);
+static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection);
+static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model);
+static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model);
+static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel);
+static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency);
+static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency);
+static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value);
+static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2);
+static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3);
+static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4);
+static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5);
+static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6);
+static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select);
+static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4);
+static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5);
+static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post);
+static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model, pod_set_delay_model);
+static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model);
+static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb);
+static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb);
+static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model, pod_set_wah_model);
+static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume);
+static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off);
+static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select);
+static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage);
+static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain);
+static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass);
+static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain);
+static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass);
+static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain);
+static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass);
+static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass);
+static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain);
+static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass);
 static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write);
 static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, line6_nop_write);
 static DEVICE_ATTR(pickup1_type, S_IRUGO, variax_get_pickup1_type, line6_nop_write);
index a3b877edbaefb0df7c495e5df5b56d774c377949..89a2b17e9cafc78744752ae41edcc062184db98b 100644 (file)
@@ -349,8 +349,8 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
-static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
+static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
+static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
 
 /* MIDI device destructor */
 static int snd_line6_midi_free(struct snd_device *device)
index 875d75a74f4ba28f371b58d6341c3ac3672febd1..4c5b9d58400041f588241ee48fe250c192652753 100644 (file)
@@ -912,33 +912,33 @@ POD_GET_SYSTEM_PARAM(tuner_pitch, 1, 1);
 #undef GET_SYSTEM_PARAM
 
 /* POD special files: */
-static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel, pod_set_channel);
+static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, pod_set_channel);
 static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write);
 static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
 static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write);
-static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump);
-static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
-static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish);
+static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump);
+static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
+static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish);
 static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, line6_nop_write);
-static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
-static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
+static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
+static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
 static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write);
 static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write);
-static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_amp_setup);
-static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read, pod_set_retrieve_channel);
-static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_effects_setup);
-static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing, pod_set_routing);
+static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_amp_setup);
+static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, pod_set_retrieve_channel);
+static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_effects_setup);
+static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, pod_set_routing);
 static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, line6_nop_write);
-static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read, pod_set_store_amp_setup);
-static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read, pod_set_store_channel);
-static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read, pod_set_store_effects_setup);
-static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
-static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
+static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, pod_set_store_amp_setup);
+static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, pod_set_store_channel);
+static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, pod_set_store_effects_setup);
+static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
+static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
 static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write);
 static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write);
 
 #if CREATE_RAW_FILE
-static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
+static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
 #endif
 
 /*
index 714687633dc8d8055048e58ebee20ed6808c9d47..eaa1229002aac2d8e46c54589296ebf6f029ebff 100644 (file)
@@ -117,8 +117,8 @@ static ssize_t toneport_set_led_green(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_red);
-static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_green);
+static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read, toneport_set_led_red);
+static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read, toneport_set_led_green);
 
 
 static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2)
index 12af54da4636a7e62cab7464920e42e5ba7b4e41..f9d96984733a8b71aa116d0895037968e3d0c457 100644 (file)
@@ -366,17 +366,17 @@ static ssize_t variax_set_raw2(struct device *dev,
 #endif
 
 /* Variax workbench special files: */
-static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model, variax_set_model);
-static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume, variax_set_volume);
-static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone);
+static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, variax_set_model);
+static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, variax_set_volume);
+static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone);
 static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write);
 static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write);
 static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write);
-static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active, variax_set_active);
+static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, variax_set_active);
 
 #if CREATE_RAW_FILE
-static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
-static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
+static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
+static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2);
 #endif
 
 
diff --git a/drivers/staging/mimio/Kconfig b/drivers/staging/mimio/Kconfig
new file mode 100644 (file)
index 0000000..505dcb2
--- /dev/null
@@ -0,0 +1,10 @@
+config INPUT_MIMIO
+       tristate "Mimio Xi interactive whiteboard support"
+       depends on USB && INPUT
+       default N
+       help
+         Say Y here if you want to use a Mimio Xi interactive
+         whiteboard device.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mimio.
diff --git a/drivers/staging/mimio/Makefile b/drivers/staging/mimio/Makefile
new file mode 100644 (file)
index 0000000..77807ee
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_INPUT_MIMIO)      += mimio.o
diff --git a/drivers/staging/mimio/mimio.c b/drivers/staging/mimio/mimio.c
new file mode 100644 (file)
index 0000000..1ba8103
--- /dev/null
@@ -0,0 +1,914 @@
+/*
+ * Hardware event => input event mapping:
+ *
+ *
+ *
+ input.h:#define BTN_TOOL_PEN            0x140 black
+ input.h:#define BTN_TOOL_RUBBER         0x141 blue
+ input.h:#define BTN_TOOL_BRUSH          0x142 green
+ input.h:#define BTN_TOOL_PENCIL         0x143 red
+ input.h:#define BTN_TOOL_AIRBRUSH       0x144 eraser
+ input.h:#define BTN_TOOL_FINGER         0x145 small eraser
+ input.h:#define BTN_TOOL_MOUSE          0x146 mimio interactive
+ input.h:#define BTN_TOOL_LENS           0x147 mimio interactive but1
+ input.h:#define LOCALBTN_TOOL_EXTRA1    0x14a mimio interactive but2 == BTN_TOUCH
+ input.h:#define LOCALBTN_TOOL_EXTRA2    0x14b mimio extra pens (orange, brown, yellow, purple) == BTN_STYLUS
+ input.h:#define LOCALBTN_TOOL_EXTRA3    0x14c unused == BTN_STYLUS2
+ input.h:#define BTN_TOOL_DOUBLETAP      0x14d unused
+ input.h:#define BTN_TOOL_TRIPLETAP      0x14e unused
+ *
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_K)     => EV_KEY BIT(BTN_TOOL_PEN)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_B)     => EV_KEY BIT(BTN_TOOL_RUBBER)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_G)     => EV_KEY BIT(BTN_TOOL_BRUSH)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_R)     => EV_KEY BIT(BTN_TOOL_PENCIL)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_E)     => EV_KEY BIT(BTN_TOOL_AIRBRUSH)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_ES)    => EV_KEY BIT(BTN_TOOL_FINGER)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_I)     => EV_KEY BIT(BTN_TOOL_MOUSE)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_IL)    => EV_KEY BIT(BTN_TOOL_LENS)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_IR)    => EV_KEY BIT(BTN_TOOL_DOUBLETAP)
+ * MIMIO_EV_PENDOWN(MIMIO_PEN_EX)    => EV_KEY BIT(BTN_TOOL_TRIPLETAP)
+ * MIMIO_EV_PENDATA                 => EV_ABS BIT(ABS_X), BIT(ABS_Y)
+ * MIMIO_EV_MEMRESET              => EV_KEY BIT(BTN_0)
+ * MIMIO_EV_ACC(ACC_NEWPAGE)       => EV_KEY BIT(BTN_1)
+ * MIMIO_EV_ACC(ACC_TAGPAGE)      => EV_KEY BIT(BTN_2)
+ * MIMIO_EV_ACC(ACC_PRINTPAGE)      => EV_KEY BIT(BTN_3)
+ * MIMIO_EV_ACC(ACC_MAXIMIZE)      => EV_KEY BIT(BTN_4)
+ * MIMIO_EV_ACC(ACC_FINDCTLPNL)      => EV_KEY BIT(BTN_5)
+ *
+ *
+ * open issues:
+ *      - cold-load of data captured when mimio in standalone mode not yet
+ *         supported; need to snoop Win32 box to see datastream for this.
+ *       - mimio mouse not yet supported; need to snoop Win32 box to see the
+ *         datastream for this.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/input.h>
+#include <linux/usb.h>
+
+#define DRIVER_VERSION         "v0.031"
+#define DRIVER_AUTHOR          "mwilder@cs.nmsu.edu"
+#define DRIVER_DESC            "USB mimio-xi driver"
+
+enum {UPVALUE, DOWNVALUE, MOVEVALUE};
+
+#define MIMIO_XRANGE_MAX       9600
+#define MIMIO_YRANGE_MAX       4800
+
+#define LOCALBTN_TOOL_EXTRA1   BTN_TOUCH
+#define LOCALBTN_TOOL_EXTRA2   BTN_STYLUS
+#define LOCALBTN_TOOL_EXTRA3   BTN_STYLUS2
+
+#define MIMIO_VENDOR_ID                0x08d3
+#define MIMIO_PRODUCT_ID       0x0001
+#define MIMIO_MAXPAYLOAD       (8)
+#define MIMIO_MAXNAMELEN       (64)
+#define MIMIO_TXWAIT           (1)
+#define MIMIO_TXDONE           (2)
+
+#define MIMIO_EV_PENDOWN       (0x22)
+#define MIMIO_EV_PENDATA       (0x24)
+#define MIMIO_EV_PENUP         (0x51)
+#define MIMIO_EV_MEMRESET      (0x45)
+#define MIMIO_EV_ACC           (0xb2)
+
+#define MIMIO_PEN_K            (1)     /* black pen */
+#define MIMIO_PEN_B            (2)     /* blue pen */
+#define MIMIO_PEN_G            (3)     /* green pen */
+#define MIMIO_PEN_R            (4)     /* red pen */
+/* 5, 6, 7, 8 are extra pens */
+#define MIMIO_PEN_E            (9)     /* big eraser */
+#define MIMIO_PEN_ES           (10)    /* lil eraser */
+#define MIMIO_PENJUMP_START    (10)
+#define MIMIO_PENJUMP          (6)
+#define MIMIO_PEN_I            (17)    /* mimio interactive */
+#define MIMIO_PEN_IL           (18)    /* mimio interactive button 1 */
+#define MIMIO_PEN_IR           (19)    /* mimio interactive button 2 */
+
+#define MIMIO_PEN_MAX          (MIMIO_PEN_IR)
+
+#define ACC_DONE               (0)
+#define ACC_NEWPAGE            (1)
+#define ACC_TAGPAGE            (2)
+#define ACC_PRINTPAGE          (4)
+#define ACC_MAXIMIZE           (8)
+#define ACC_FINDCTLPNL         (16)
+
+#define isvalidtxsize(n)       ((n) > 0 && (n) <= MIMIO_MAXPAYLOAD)
+
+
+struct pktbuf {
+       unsigned char instr;
+       unsigned char buf[16];
+       unsigned char *p;
+       unsigned char *q;
+};
+
+struct usbintendpt {
+       dma_addr_t dma;
+       struct urb *urb;
+       unsigned char *buf;
+       struct usb_endpoint_descriptor *desc;
+};
+
+struct mimio {
+       struct input_dev *idev;
+       struct usb_device *udev;
+       struct usb_interface *uifc;
+       int open;
+       int present;
+       int greeted;
+       int txflags;
+       char phys[MIMIO_MAXNAMELEN];
+       struct usbintendpt in;
+       struct usbintendpt out;
+       struct pktbuf pktbuf;
+       unsigned char minor;
+       wait_queue_head_t waitq;
+       spinlock_t txlock;
+       void (*rxhandler)(struct mimio *, unsigned char *, unsigned int);
+       int last_pen_down;
+};
+
+static void mimio_close(struct input_dev *);
+static void mimio_dealloc(struct mimio *);
+static void mimio_disconnect(struct usb_interface *);
+static int mimio_greet(struct mimio *);
+static void mimio_irq_in(struct urb *);
+static void mimio_irq_out(struct urb *);
+static int mimio_open(struct input_dev *);
+static int mimio_probe(struct usb_interface *, const struct usb_device_id *);
+static void mimio_rx_handler(struct mimio *, unsigned char *, unsigned int);
+static int mimio_tx(struct mimio *, const char *, int);
+
+static char mimio_name[] = "VirtualInk mimio-Xi";
+static struct usb_device_id mimio_table [] = {
+       { USB_DEVICE(MIMIO_VENDOR_ID, MIMIO_PRODUCT_ID) },
+       { USB_DEVICE(0x0525, 0xa4a0) }, /* gadget zero firmware */
+       { }
+};
+
+MODULE_DEVICE_TABLE(usb, mimio_table);
+
+static struct usb_driver mimio_driver = {
+       .name = "mimio",
+       .probe = mimio_probe,
+       .disconnect = mimio_disconnect,
+       .id_table = mimio_table,
+};
+
+static DECLARE_MUTEX(disconnect_sem);
+
+static void mimio_close(struct input_dev *idev)
+{
+       struct mimio *mimio;
+
+       mimio = input_get_drvdata(idev);
+       if (!mimio) {
+               dev_err(&idev->dev, "null mimio attached to input device\n");
+               return;
+       }
+
+       if (mimio->open <= 0)
+               dev_err(&idev->dev, "mimio not open.\n");
+       else
+               mimio->open--;
+
+       if (mimio->present == 0 && mimio->open == 0)
+               mimio_dealloc(mimio);
+}
+
+static void mimio_dealloc(struct mimio *mimio)
+{
+       if (mimio == NULL)
+               return;
+
+       usb_kill_urb(mimio->in.urb);
+
+       usb_kill_urb(mimio->out.urb);
+
+       if (mimio->idev) {
+               input_unregister_device(mimio->idev);
+               if (mimio->idev->grab)
+                       input_close_device(mimio->idev->grab);
+               else
+                       dev_dbg(&mimio->idev->dev, "mimio->idev->grab == NULL"
+                               " -- didn't call input_close_device\n");
+       }
+
+       usb_free_urb(mimio->in.urb);
+
+       usb_free_urb(mimio->out.urb);
+
+       if (mimio->in.buf) {
+               usb_buffer_free(mimio->udev, MIMIO_MAXPAYLOAD, mimio->in.buf,
+                               mimio->in.dma);
+       }
+
+       if (mimio->out.buf)
+               usb_buffer_free(mimio->udev, MIMIO_MAXPAYLOAD, mimio->out.buf,
+                               mimio->out.dma);
+
+       if (mimio->idev)
+               input_free_device(mimio->idev);
+
+       kfree(mimio);
+}
+
+static void mimio_disconnect(struct usb_interface *ifc)
+{
+       struct mimio *mimio;
+
+       down(&disconnect_sem);
+
+       mimio = usb_get_intfdata(ifc);
+       usb_set_intfdata(ifc, NULL);
+       dev_dbg(&mimio->idev->dev, "disconnect\n");
+
+       if (mimio) {
+               mimio->present = 0;
+
+               if (mimio->open <= 0)
+                       mimio_dealloc(mimio);
+       }
+
+       up(&disconnect_sem);
+}
+
+static int mimio_greet(struct mimio *mimio)
+{
+       const struct grtpkt {
+               int nbytes;
+               unsigned delay;
+               char data[8];
+       } grtpkts[] = {
+               { 3, 0, { 0x11, 0x55, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00 } },
+               { 5, 0, { 0x53, 0x55, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00 } },
+               { 5, 0, { 0x43, 0x55, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00 } },
+               { 5, 0, { 0x33, 0x55, 0x00, 0x00, 0x66, 0x00, 0x00, 0x00 } },
+               { 5, 0, { 0x13, 0x00, 0x5e, 0x02, 0x4f, 0x00, 0x00, 0x00 } },
+               { 5, 0, { 0x13, 0x00, 0x04, 0x03, 0x14, 0x00, 0x00, 0x00 } },
+               { 5, 2, { 0x13, 0x00, 0x00, 0x04, 0x17, 0x00, 0x00, 0x00 } },
+               { 5, 0, { 0x13, 0x00, 0x0d, 0x08, 0x16, 0x00, 0x00, 0x00 } },
+               { 5, 0, { 0x13, 0x00, 0x4d, 0x01, 0x5f, 0x00, 0x00, 0x00 } },
+               { 3, 0, { 0xf1, 0x55, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00 } },
+               { 7, 2, { 0x52, 0x55, 0x00, 0x07, 0x31, 0x55, 0x64, 0x00 } },
+               { 0, 0, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } },
+       };
+       int rslt;
+       const struct grtpkt *pkt;
+
+       for (pkt = grtpkts; pkt->nbytes; pkt++) {
+               rslt = mimio_tx(mimio, pkt->data, pkt->nbytes);
+               if (rslt)
+                       return rslt;
+               if (pkt->delay)
+                       msleep(pkt->delay);
+       }
+
+       return 0;
+}
+
+static void mimio_irq_in(struct urb *urb)
+{
+       int rslt;
+       char *data;
+       const char *reason = "going down";
+       struct mimio *mimio;
+
+       mimio = urb->context;
+
+       if (mimio == NULL)
+               /* paranoia */
+               return;
+
+       switch (urb->status) {
+       case 0:
+               /* success */
+               break;
+       case -ETIMEDOUT:
+               reason = "timeout -- unplugged?";
+       case -ECONNRESET:
+       case -ENOENT:
+       case -ESHUTDOWN:
+               dev_dbg(&mimio->idev->dev, "%s.\n", reason);
+               return;
+       default:
+               dev_dbg(&mimio->idev->dev, "unknown urb-status: %d.\n",
+                       urb->status);
+               goto exit;
+       }
+       data = mimio->in.buf;
+
+       if (mimio->rxhandler)
+               mimio->rxhandler(mimio, data, urb->actual_length);
+exit:
+       /*
+        * Keep listening to device on same urb.
+        */
+       rslt = usb_submit_urb(urb, GFP_ATOMIC);
+       if (rslt)
+               dev_err(&mimio->idev->dev, "usb_submit_urb failure: %d.\n",
+                       rslt);
+}
+
+static void mimio_irq_out(struct urb *urb)
+{
+       unsigned long flags;
+       struct mimio *mimio;
+
+       mimio = urb->context;
+
+       if (urb->status)
+               dev_dbg(&mimio->idev->dev, "urb-status: %d.\n", urb->status);
+
+       spin_lock_irqsave(&mimio->txlock, flags);
+       mimio->txflags |= MIMIO_TXDONE;
+       spin_unlock_irqrestore(&mimio->txlock, flags);
+       wmb();
+       wake_up(&mimio->waitq);
+}
+
+static int mimio_open(struct input_dev *idev)
+{
+       int rslt;
+       struct mimio *mimio;
+
+       rslt = 0;
+       down(&disconnect_sem);
+       mimio = input_get_drvdata(idev);
+       dev_dbg(&idev->dev, "mimio_open\n");
+
+       if (mimio == NULL) {
+               dev_err(&idev->dev, "null mimio.\n");
+               rslt = -ENODEV;
+               goto exit;
+       }
+
+       if (mimio->open++)
+               goto exit;
+
+       if (mimio->present && !mimio->greeted) {
+               struct urb *urb = mimio->in.urb;
+               mimio->in.urb->dev = mimio->udev;
+               rslt = usb_submit_urb(mimio->in.urb, GFP_KERNEL);
+               if (rslt) {
+                       dev_err(&idev->dev, "usb_submit_urb failure "
+                               "(res = %d: %s). Not greeting.\n",
+                               rslt,
+                               (!urb ? "urb is NULL" :
+                                (urb->hcpriv ? "urb->hcpriv is non-NULL" :
+                                 (!urb->complete ? "urb is not complete" :
+                                  (urb->number_of_packets <= 0 ? "urb has no packets" :
+                                   (urb->interval <= 0 ? "urb interval too small" :
+                                    "urb interval too large or some other error"))))));
+                       rslt = -EIO;
+                       goto exit;
+               }
+               rslt = mimio_greet(mimio);
+               if (rslt == 0) {
+                       dev_dbg(&idev->dev, "Mimio greeted OK.\n");
+                       mimio->greeted = 1;
+               } else {
+                       dev_dbg(&idev->dev, "Mimio greet Failure (%d)\n",
+                               rslt);
+               }
+       }
+
+exit:
+       up(&disconnect_sem);
+       return rslt;
+}
+
+static int mimio_probe(struct usb_interface *ifc,
+                      const struct usb_device_id *id)
+{
+       char path[64];
+       int pipe, maxp;
+       struct mimio *mimio;
+       struct usb_device *udev;
+       struct usb_host_interface *hostifc;
+       struct input_dev *input_dev;
+       int res = 0;
+       int i;
+
+       udev = interface_to_usbdev(ifc);
+
+       mimio = kzalloc(sizeof(struct mimio), GFP_KERNEL);
+       if (!mimio)
+               return -ENOMEM;
+
+       input_dev = input_allocate_device();
+       if (!input_dev) {
+               mimio_dealloc(mimio);
+               return -ENOMEM;
+       }
+
+       mimio->uifc = ifc;
+       mimio->udev = udev;
+       mimio->pktbuf.p = mimio->pktbuf.buf;
+       mimio->pktbuf.q = mimio->pktbuf.buf;
+       /* init_input_dev(mimio->idev); */
+       mimio->idev = input_dev;
+       init_waitqueue_head(&mimio->waitq);
+       spin_lock_init(&mimio->txlock);
+       hostifc = ifc->cur_altsetting;
+
+       if (hostifc->desc.bNumEndpoints != 2) {
+               dev_err(&udev->dev, "Unexpected endpoint count: %d.\n",
+                       hostifc->desc.bNumEndpoints);
+               mimio_dealloc(mimio);
+               return -ENODEV;
+       }
+
+       mimio->in.desc = &(hostifc->endpoint[0].desc);
+       mimio->out.desc = &(hostifc->endpoint[1].desc);
+
+       mimio->in.buf = usb_buffer_alloc(udev, MIMIO_MAXPAYLOAD, GFP_KERNEL,
+                                        &mimio->in.dma);
+       mimio->out.buf = usb_buffer_alloc(udev, MIMIO_MAXPAYLOAD, GFP_KERNEL,
+                                         &mimio->out.dma);
+
+       if (mimio->in.buf == NULL || mimio->out.buf == NULL) {
+               dev_err(&udev->dev, "usb_buffer_alloc failure.\n");
+               mimio_dealloc(mimio);
+               return -ENOMEM;
+       }
+
+       mimio->in.urb = usb_alloc_urb(0, GFP_KERNEL);
+       mimio->out.urb = usb_alloc_urb(0, GFP_KERNEL);
+
+       if (mimio->in.urb == NULL || mimio->out.urb == NULL) {
+               dev_err(&udev->dev, "usb_alloc_urb failure.\n");
+               mimio_dealloc(mimio);
+               return -ENOMEM;
+       }
+
+       /*
+        * Build the input urb.
+        */
+       pipe = usb_rcvintpipe(udev, mimio->in.desc->bEndpointAddress);
+       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       if (maxp > MIMIO_MAXPAYLOAD)
+               maxp = MIMIO_MAXPAYLOAD;
+       usb_fill_int_urb(mimio->in.urb, udev, pipe, mimio->in.buf, maxp,
+                        mimio_irq_in, mimio, mimio->in.desc->bInterval);
+       mimio->in.urb->transfer_dma = mimio->in.dma;
+       mimio->in.urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+       /*
+        * Build the output urb.
+        */
+       pipe = usb_sndintpipe(udev, mimio->out.desc->bEndpointAddress);
+       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+       if (maxp > MIMIO_MAXPAYLOAD)
+               maxp = MIMIO_MAXPAYLOAD;
+       usb_fill_int_urb(mimio->out.urb, udev, pipe, mimio->out.buf, maxp,
+                        mimio_irq_out, mimio, mimio->out.desc->bInterval);
+       mimio->out.urb->transfer_dma = mimio->out.dma;
+       mimio->out.urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+       /*
+        * Build input device info
+        */
+       usb_make_path(udev, path, 64);
+       snprintf(mimio->phys, MIMIO_MAXNAMELEN, "%s/input0", path);
+       input_set_drvdata(input_dev, mimio);
+       /* input_dev->dev = &ifc->dev; */
+       input_dev->open = mimio_open;
+       input_dev->close = mimio_close;
+       input_dev->name = mimio_name;
+       input_dev->phys = mimio->phys;
+       input_dev->dev.parent = &ifc->dev;
+
+       input_dev->id.bustype = BUS_USB;
+       input_dev->id.vendor = le16_to_cpu(udev->descriptor.idVendor);
+       input_dev->id.product = le16_to_cpu(udev->descriptor.idProduct);
+       input_dev->id.version = le16_to_cpu(udev->descriptor.bcdDevice);
+
+       input_dev->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS);
+       for (i = BTN_TOOL_PEN; i <= LOCALBTN_TOOL_EXTRA2; ++i)
+               set_bit(i, input_dev->keybit);
+
+       input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
+                                                BIT_MASK(BTN_1) |
+                                                BIT_MASK(BTN_2) |
+                                                BIT_MASK(BTN_3) |
+                                                BIT_MASK(BTN_4) |
+                                                BIT_MASK(BTN_5);
+       /*   input_dev->keybit[BTN_MOUSE] |= BIT(BTN_LEFT); */
+       input_dev->absbit[0] |= BIT_MASK(ABS_X) | BIT_MASK(ABS_Y);
+       input_set_abs_params(input_dev, ABS_X, 0, MIMIO_XRANGE_MAX, 0, 0);
+       input_set_abs_params(input_dev, ABS_Y, 0, MIMIO_YRANGE_MAX, 0, 0);
+       input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
+
+#if 0
+       input_dev->absmin[ABS_X] = 0;
+       input_dev->absmin[ABS_Y] = 0;
+       input_dev->absmax[ABS_X] = 9600;
+       input_dev->absmax[ABS_Y] = 4800;
+       input_dev->absfuzz[ABS_X] = 0;
+       input_dev->absfuzz[ABS_Y] = 0;
+       input_dev->absflat[ABS_X] = 0;
+       input_dev->absflat[ABS_Y] = 0;
+#endif
+
+#if 0
+       /* this will just reduce the precision */
+       input_dev->absfuzz[ABS_X] = 8; /* experimental; may need to change */
+       input_dev->absfuzz[ABS_Y] = 8; /* experimental; may need to change */
+#endif
+
+       /*
+        * Register the input device.
+        */
+       res = input_register_device(mimio->idev);
+       if (res) {
+               dev_err(&udev->dev, "input_register_device failure (%d)\n",
+                       res);
+               mimio_dealloc(mimio);
+               return -EIO;
+       }
+       dev_dbg(&mimio->idev->dev, "input: %s on %s (res = %d).\n",
+               input_dev->name, input_dev->phys, res);
+
+       usb_set_intfdata(ifc, mimio);
+       mimio->present = 1;
+
+       /*
+        * Submit the input urb to the usb subsystem.
+        */
+       mimio->in.urb->dev = mimio->udev;
+       res = usb_submit_urb(mimio->in.urb, GFP_KERNEL);
+       if (res) {
+               dev_err(&mimio->idev->dev, "usb_submit_urb failure (%d)\n",
+                       res);
+               mimio_dealloc(mimio);
+               return -EIO;
+       }
+
+       /*
+        * Attempt to greet the mimio after giving
+        * it some post-init settling time.
+        *
+        * note: sometimes this sleep interval isn't
+        * long enough to permit the device to re-init
+        * after a hot-swap; maybe need to bump it up.
+        *
+        * As it is, this probably breaks module unloading support!
+        */
+       msleep(1024);
+
+       res = mimio_greet(mimio);
+       if (res == 0) {
+               dev_dbg(&mimio->idev->dev, "Mimio greeted OK.\n");
+               mimio->greeted = 1;
+               mimio->rxhandler = mimio_rx_handler;
+       } else {
+               dev_dbg(&mimio->idev->dev, "Mimio greet Failure (%d)\n", res);
+       }
+
+       return 0;
+}
+
+static int handle_mimio_rx_penupdown(struct mimio *mimio,
+                                    int down,
+                                    const char *const instr[],
+                                    const int instr_ofst[])
+{
+       int penid, x;
+       if (mimio->pktbuf.q - mimio->pktbuf.p < (down ? 4 : 3))
+               return 1;               /* partial pkt */
+
+       if (down) {
+               x = *mimio->pktbuf.p ^ *(mimio->pktbuf.p + 1) ^
+                       *(mimio->pktbuf.p + 2);
+               if (x != *(mimio->pktbuf.p + 3)) {
+                       dev_dbg(&mimio->idev->dev, "EV_PEN%s: bad xsum.\n",
+                               down ? "DOWN":"UP");
+                       /* skip this event data */
+                       mimio->pktbuf.p += 4;
+                       /* decode any remaining events */
+                       return 0;
+               }
+               penid = mimio->pktbuf.instr = *(mimio->pktbuf.p + 2);
+               if (penid > MIMIO_PEN_MAX) {
+                       dev_dbg(&mimio->idev->dev,
+                               "Unmapped penID (not in [0, %d]): %d\n",
+                               MIMIO_PEN_MAX, (int)mimio->pktbuf.instr);
+                       penid = mimio->pktbuf.instr = 0;
+               }
+               mimio->last_pen_down = penid;
+       } else {
+               penid = mimio->last_pen_down;
+       }
+       dev_dbg(&mimio->idev->dev, "%s (id %d, code %d) %s.\n", instr[penid],
+               instr_ofst[penid], penid, down ? "down" : "up");
+
+       if (instr_ofst[penid] >= 0) {
+               int code = BTN_TOOL_PEN + instr_ofst[penid];
+               int value = down ? DOWNVALUE : UPVALUE;
+               if (code > KEY_MAX)
+                       dev_dbg(&mimio->idev->dev, "input_event will ignore "
+                               "-- code (%d) > KEY_MAX\n", code);
+               if (!test_bit(code, mimio->idev->keybit))
+                       dev_dbg(&mimio->idev->dev, "input_event will ignore "
+                               "-- bit for code (%d) not enabled\n", code);
+               if (!!test_bit(code, mimio->idev->key) == value)
+                       dev_dbg(&mimio->idev->dev, "input_event will ignore "
+                               "-- bit for code (%d) already set to %d\n",
+                               code, value);
+               if (value != DOWNVALUE) {
+                       /* input_regs(mimio->idev, regs); */
+                       input_report_key(mimio->idev, code, value);
+                       input_sync(mimio->idev);
+               } else {
+                       /* wait until we get some coordinates */
+               }
+       } else {
+               dev_dbg(&mimio->idev->dev, "penID offset[%d] == %d is < 0 "
+                       "- not sending\n", penid, instr_ofst[penid]);
+       }
+       mimio->pktbuf.p += down ? 4 : 3; /* 3 for up, 4 for down */
+       return 0;
+}
+
+/*
+ * Stay tuned for partial-packet excitement.
+ *
+ * This routine buffers data packets received from the mimio device
+ * in the mimio's data space.  This buffering is necessary because
+ * the mimio's in endpoint can serve us partial packets of data, and
+ * we want the driver to support the servicing of multiple mimios.
+ * Empirical evidence gathered so far suggests that the method of
+ * buffering packet data in the mimio's data space works.  Previous
+ * versions of this driver did not buffer packet data in each mimio's
+ * data-space, and were therefore not able to service multiple mimios.
+ * Note that since the caller of this routine is running in interrupt
+ * context, care needs to be taken to ensure that this routine does not
+ * become bloated, and it may be that another spinlock is needed in each
+ * mimio to guard the buffered packet data properly.
+ */
+static void mimio_rx_handler(struct mimio *mimio,
+                            unsigned char *data,
+                            unsigned int nbytes)
+{
+       struct device *dev = &mimio->idev->dev;
+       unsigned int x;
+       unsigned int y;
+       static const char * const instr[] = {
+               "?0",
+               "black pen", "blue pen", "green pen", "red pen",
+               "brown pen", "orange pen", "purple pen", "yellow pen",
+               "big eraser", "lil eraser",
+               "?11", "?12", "?13", "?14", "?15", "?16",
+               "mimio interactive", "interactive button1",
+               "interactive button2"
+       };
+
+       /* Mimio Interactive gives:
+        * down: [0x22 0x01 0x11 0x32 0x24]
+        * b1  : [0x22 0x01 0x12 0x31 0x24]
+        * b2  : [0x22 0x01 0x13 0x30 0x24]
+        */
+       static const int instr_ofst[] = {
+               -1,
+               0, 1, 2, 3,
+               9, 9, 9, 9,
+               4, 5,
+               -1, -1, -1, -1, -1, -1,
+               6, 7, 8,
+       };
+
+       memcpy(mimio->pktbuf.q, data, nbytes);
+       mimio->pktbuf.q += nbytes;
+
+       while (mimio->pktbuf.p < mimio->pktbuf.q) {
+               int t = *mimio->pktbuf.p;
+               switch (t) {
+               case MIMIO_EV_PENUP:
+               case MIMIO_EV_PENDOWN:
+                       if (handle_mimio_rx_penupdown(mimio,
+                                                     t == MIMIO_EV_PENDOWN,
+                                                     instr, instr_ofst))
+                               return; /* partial packet */
+                       break;
+
+               case MIMIO_EV_PENDATA:
+                       if (mimio->pktbuf.q - mimio->pktbuf.p < 6)
+                               /* partial pkt */
+                               return;
+                       x = *mimio->pktbuf.p ^ *(mimio->pktbuf.p + 1) ^
+                               *(mimio->pktbuf.p + 2) ^
+                               *(mimio->pktbuf.p + 3) ^
+                               *(mimio->pktbuf.p + 4);
+                       if (x != *(mimio->pktbuf.p + 5)) {
+                               dev_dbg(dev, "EV_PENDATA: bad xsum.\n");
+                               mimio->pktbuf.p += 6; /* skip this event data */
+                               break; /* decode any remaining events */
+                       }
+                       x = *(mimio->pktbuf.p + 1);
+                       x <<= 8;
+                       x |= *(mimio->pktbuf.p + 2);
+                       y = *(mimio->pktbuf.p + 3);
+                       y <<= 8;
+                       y |= *(mimio->pktbuf.p + 4);
+                       dev_dbg(dev, "coord: (%d, %d)\n", x, y);
+                       if (instr_ofst[mimio->pktbuf.instr] >= 0) {
+                               int code = BTN_TOOL_PEN +
+                                          instr_ofst[mimio->last_pen_down];
+#if 0
+                               /* Utter hack to ensure we get forwarded _AND_
+                                * so we can identify when a complete signal is
+                                * received */
+                               mimio->idev->abs[ABS_Y] = -1;
+                               mimio->idev->abs[ABS_X] = -1;
+#endif
+                               /* input_regs(mimio->idev, regs); */
+                               input_report_abs(mimio->idev, ABS_X, x);
+                               input_report_abs(mimio->idev, ABS_Y, y);
+                               /* fake a penup */
+                               change_bit(code, mimio->idev->key);
+                               input_report_key(mimio->idev,
+                                                code,
+                                                DOWNVALUE);
+                               /* always sync here */
+                               mimio->idev->sync = 0;
+                               input_sync(mimio->idev);
+                       }
+                       mimio->pktbuf.p += 6;
+                       break;
+               case MIMIO_EV_MEMRESET:
+                       if (mimio->pktbuf.q - mimio->pktbuf.p < 7)
+                               /* partial pkt */
+                               return;
+                       dev_dbg(dev, "mem-reset.\n");
+                       /* input_regs(mimio->idev, regs); */
+                       input_event(mimio->idev, EV_KEY, BTN_0, 1);
+                       input_event(mimio->idev, EV_KEY, BTN_0, 0);
+                       input_sync(mimio->idev);
+                       mimio->pktbuf.p += 7;
+                       break;
+               case MIMIO_EV_ACC:
+                       if (mimio->pktbuf.q - mimio->pktbuf.p < 4)
+                               /* partial pkt */
+                               return;
+                       x = *mimio->pktbuf.p ^ *(mimio->pktbuf.p + 1) ^
+                               *(mimio->pktbuf.p + 2);
+                       if (x != *(mimio->pktbuf.p + 3)) {
+                               dev_dbg(dev, "EV_ACC: bad xsum.\n");
+                               mimio->pktbuf.p += 4; /* skip this event data */
+                               break; /* decode any remaining events */
+                       }
+                       switch (*(mimio->pktbuf.p + 2)) {
+                       case ACC_NEWPAGE:
+                               dev_dbg(&mimio->idev->dev, "new-page.\n");
+                               /* input_regs(mimio->idev, regs); */
+                               input_event(mimio->idev, EV_KEY, BTN_1, 1);
+                               input_event(mimio->idev, EV_KEY, BTN_1, 0);
+                               input_sync(mimio->idev);
+                               break;
+                       case ACC_TAGPAGE:
+                               dev_dbg(&mimio->idev->dev, "tag-page.\n");
+                               /* input_regs(mimio->idev, regs); */
+                               input_event(mimio->idev, EV_KEY, BTN_2, 1);
+                               input_event(mimio->idev, EV_KEY, BTN_2, 0);
+                               input_sync(mimio->idev);
+                               break;
+                       case ACC_PRINTPAGE:
+                               dev_dbg(&mimio->idev->dev, "print-page.\n");
+                               /* input_regs(mimio->idev, regs);*/
+                               input_event(mimio->idev, EV_KEY, BTN_3, 1);
+                               input_event(mimio->idev, EV_KEY, BTN_3, 0);
+                               input_sync(mimio->idev);
+                               break;
+                       case ACC_MAXIMIZE:
+                               dev_dbg(&mimio->idev->dev,
+                                       "maximize-window.\n");
+                               /* input_regs(mimio->idev, regs); */
+                               input_event(mimio->idev, EV_KEY, BTN_4, 1);
+                               input_event(mimio->idev, EV_KEY, BTN_4, 0);
+                               input_sync(mimio->idev);
+                               break;
+                       case ACC_FINDCTLPNL:
+                               dev_dbg(&mimio->idev->dev, "find-ctl-panel.\n");
+                               /* input_regs(mimio->idev, regs); */
+                               input_event(mimio->idev, EV_KEY, BTN_5, 1);
+                               input_event(mimio->idev, EV_KEY, BTN_5, 0);
+                               input_sync(mimio->idev);
+                               break;
+                       case ACC_DONE:
+                               dev_dbg(&mimio->idev->dev, "acc-done.\n");
+                               /* no event is dispatched to the input
+                                * subsystem for this device event.
+                                */
+                               break;
+                       default:
+                               dev_dbg(dev, "unknown acc event.\n");
+                               break;
+                       }
+                       mimio->pktbuf.p += 4;
+                       break;
+               default:
+                       mimio->pktbuf.p++;
+                       break;
+               }
+       }
+
+       /*
+        * No partial event was received, so reset mimio's pktbuf ptrs.
+        */
+       mimio->pktbuf.p = mimio->pktbuf.q = mimio->pktbuf.buf;
+}
+
+static int mimio_tx(struct mimio *mimio, const char *buf, int nbytes)
+{
+       int rslt;
+       int timeout;
+       unsigned long flags;
+       DECLARE_WAITQUEUE(wait, current);
+
+       if (!(isvalidtxsize(nbytes))) {
+               dev_err(&mimio->idev->dev, "invalid arg: nbytes: %d.\n",
+                       nbytes);
+               return -EINVAL;
+       }
+
+       /*
+        * Init the out urb and copy the data to send.
+        */
+       mimio->out.urb->dev = mimio->udev;
+       mimio->out.urb->transfer_buffer_length = nbytes;
+       memcpy(mimio->out.urb->transfer_buffer, buf, nbytes);
+
+       /*
+        * Send the data.
+        */
+       spin_lock_irqsave(&mimio->txlock, flags);
+       mimio->txflags = MIMIO_TXWAIT;
+       rslt = usb_submit_urb(mimio->out.urb, GFP_ATOMIC);
+       spin_unlock_irqrestore(&mimio->txlock, flags);
+       dev_dbg(&mimio->idev->dev, "rslt: %d.\n", rslt);
+
+       if (rslt) {
+               dev_err(&mimio->idev->dev, "usb_submit_urb failure: %d.\n",
+                       rslt);
+               return rslt;
+       }
+
+       /*
+        * Wait for completion to be signalled (the mimio_irq_out
+        * completion routine will or MIMIO_TXDONE in with txflags).
+        */
+       timeout = HZ;
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&mimio->waitq, &wait);
+
+       while (timeout && ((mimio->txflags & MIMIO_TXDONE) == 0)) {
+               timeout = schedule_timeout(timeout);
+               rmb();
+       }
+
+       if ((mimio->txflags & MIMIO_TXDONE) == 0)
+               dev_dbg(&mimio->idev->dev, "tx timed out.\n");
+
+       /*
+        * Now that completion has been signalled,
+        * unlink the urb so that it can be recycled.
+        */
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(&mimio->waitq, &wait);
+       usb_unlink_urb(mimio->out.urb);
+
+       return rslt;
+}
+
+static int __init mimio_init(void)
+{
+       int rslt;
+
+       rslt = usb_register(&mimio_driver);
+       if (rslt != 0) {
+               err("%s: usb_register failure: %d", __func__, rslt);
+               return rslt;
+       }
+
+       printk(KERN_INFO KBUILD_MODNAME ":"
+              DRIVER_DESC " " DRIVER_VERSION "\n");
+       return rslt;
+}
+
+static void __exit mimio_exit(void)
+{
+       usb_deregister(&mimio_driver);
+}
+
+module_init(mimio_init);
+module_exit(mimio_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
index c39a25f500ef9d011436e444cf7d397f8cb7d252..4ce399b6d237546961b826734ca8c77003915eb4 100644 (file)
@@ -2181,7 +2181,6 @@ int panel_init(void)
                if (pprt) {
                        parport_release(pprt);
                        parport_unregister_device(pprt);
-                       pprt = NULL;
                }
                parport_unregister_driver(&panel_driver);
                printk(KERN_ERR "Panel driver version " PANEL_VERSION
@@ -2231,7 +2230,6 @@ static void __exit panel_cleanup_module(void)
                /* TODO: free all input signals */
                parport_release(pprt);
                parport_unregister_device(pprt);
-               pprt = NULL;
        }
        parport_unregister_driver(&panel_driver);
 }
index 98b0f8e726fa07fbefa6d42db58d4a4b0355c00c..0bc0fb99d2e40c0692768a3213e2f781405c9e84 100644 (file)
@@ -716,7 +716,7 @@ VOID RTMPFreeTxRxRingMemory(
        {
                if ((pAd->RxRing.Cell[index].DmaBuf.AllocVa) && (pAd->RxRing.Cell[index].pNdisPacket))
                {
-                       PCI_UNMAP_SINGLE(pAd, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
+                       PCI_UNMAP_SINGLE(pObj->pci_dev, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
                        RELEASE_NDIS_PACKET(pAd, pAd->RxRing.Cell[index].pNdisPacket, NDIS_STATUS_SUCCESS);
                }
        }
index c30773b5c59e715c9f4714a88339c0fe4db5bc47..cd07059b25b51dc29da0651b769f5e57f743e803 100644 (file)
@@ -356,12 +356,8 @@ HwHSSIThreeWire(
                        }
                        udelay(10);
                }
-               if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) {
-                       printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:"
-                              " %#X RE|WE bits are not clear!!\n", u1bTmp);
-                       dump_stack();
-                       return 0;
-               }
+               if (TryCnt == TC_3W_POLL_MAX_TRY_CNT)
+                       panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp);
 
                // RTL8187S HSSI Read/Write Function
                u1bTmp = read_nic_byte(dev, RF_SW_CONFIG);
@@ -401,23 +397,13 @@ HwHSSIThreeWire(
                                int idx;
                                int ByteCnt = nDataBufBitCnt / 8;
                                 //printk("%d\n",nDataBufBitCnt);
-                               if ((nDataBufBitCnt % 8) != 0) {
-                                       printk(KERN_ERR "rtl8187se: "
-                                              "HwThreeWire(): nDataBufBitCnt(%d)"
-                                              " should be multiple of 8!!!\n",
-                                              nDataBufBitCnt);
-                                       dump_stack();
-                                       nDataBufBitCnt += 8;
-                                       nDataBufBitCnt &= ~7;
-                               }
+                               if ((nDataBufBitCnt % 8) != 0)
+                               panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n",
+                               nDataBufBitCnt);
 
-                              if (nDataBufBitCnt > 64) {
-                                       printk(KERN_ERR "rtl8187se: HwThreeWire():"
-                                              " nDataBufBitCnt(%d) should <= 64!!!\n",
-                                              nDataBufBitCnt);
-                                       dump_stack();
-                                       nDataBufBitCnt = 64;
-                               }
+                              if (nDataBufBitCnt > 64)
+                               panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n",
+                               nDataBufBitCnt);
 
                                for(idx = 0; idx < ByteCnt; idx++)
                                {
index 6d52d6adbb41f4878a9a6501dc04753eda145b8f..66274d7666ff04de3b7e6215196d377e740f1b9d 100644 (file)
@@ -112,25 +112,20 @@ u32 rt_global_debug_component = \
 
 static struct usb_device_id rtl8192_usb_id_tbl[] = {
        /* Realtek */
-       {USB_DEVICE(0x0bda, 0x8171)},
        {USB_DEVICE(0x0bda, 0x8192)},
        {USB_DEVICE(0x0bda, 0x8709)},
        /* Corega */
        {USB_DEVICE(0x07aa, 0x0043)},
        /* Belkin */
        {USB_DEVICE(0x050d, 0x805E)},
-       {USB_DEVICE(0x050d, 0x815F)}, /* Belkin F5D8053 v6 */
        /* Sitecom */
        {USB_DEVICE(0x0df6, 0x0031)},
-       {USB_DEVICE(0x0df6, 0x004b)},   /* WL-349 */
        /* EnGenius */
        {USB_DEVICE(0x1740, 0x9201)},
        /* Dlink */
        {USB_DEVICE(0x2001, 0x3301)},
        /* Zinwell */
        {USB_DEVICE(0x5a57, 0x0290)},
-       /* Guillemot */
-       {USB_DEVICE(0x06f8, 0xe031)},
        //92SU
        {USB_DEVICE(0x0bda, 0x8172)},
        {}
index af3832b03e4b95cb12394ea0ed6b5232d091c9ac..6da1021e8a65edd1d9baad721ae3d50bc58f78da 100644 (file)
@@ -38,13 +38,21 @@ static int event_handler(struct usbip_device *ud)
                        ud->eh_ops.shutdown(ud);
 
                        ud->event &= ~USBIP_EH_SHUTDOWN;
+
+                       break;
                }
 
+               /* Stop the error handler. */
+               if (ud->event & USBIP_EH_BYE)
+                       return -1;
+
                /* Reset the device. */
                if (ud->event & USBIP_EH_RESET) {
                        ud->eh_ops.reset(ud);
 
                        ud->event &= ~USBIP_EH_RESET;
+
+                       break;
                }
 
                /* Mark the device as unusable. */
@@ -52,11 +60,13 @@ static int event_handler(struct usbip_device *ud)
                        ud->eh_ops.unusable(ud);
 
                        ud->event &= ~USBIP_EH_UNUSABLE;
+
+                       break;
                }
 
-               /* Stop the error handler. */
-               if (ud->event & USBIP_EH_BYE)
-                       return -1;
+               /* NOTREACHED */
+               printk(KERN_ERR "%s: unknown event\n", __func__);
+               return -1;
        }
 
        return 0;
@@ -107,9 +117,6 @@ void usbip_stop_eh(struct usbip_device *ud)
 {
        struct usbip_task *eh = &ud->eh;
 
-       if (eh->thread == current)
-               return; /* do not wait for myself */
-
        wait_for_completion(&eh->thread_done);
        usbip_dbg_eh("usbip_eh has finished\n");
 }
index c2018029059c5ce35b17a1965b443f586c1ceec7..6e91fc2bd850a2bf9c3e2f11f40fb935b93df873 100644 (file)
@@ -163,8 +163,6 @@ void rh_port_disconnect(int rhport)
         * spin_unlock(&vdev->ud.lock); */
 
        spin_unlock_irqrestore(&the_controller->lock, flags);
-
-       usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
 }
 
 
index 269d1e2382b75d050e5bb17c89d09af97ac9a7fd..53450b48eaa6e615ae4626f7aa4192003bbc3447 100644 (file)
@@ -1089,13 +1089,11 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
     }
 //2008-07-21-01<Add>by MikeLiu
 //register wpadev
-#if 0
    if(wpa_set_wpadev(pDevice, 1)!=0) {
      printk("Fail to Register WPADEV?\n");
         unregister_netdev(pDevice->dev);
         free_netdev(dev);
    }
-#endif
     device_print_info(pDevice);
     pci_set_drvdata(pcid, pDevice);
     return 0;
index a078f6f50d7046ac1aa89f1026931055b1ec5a33..574e0b0a9c28387e2b6e1181c8ef0b8ab8c0a1ca 100644 (file)
@@ -767,14 +767,9 @@ static int wpa_set_associate(PSDevice pDevice,
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
 
 
-       if (param->u.wpa_associate.wpa_ie_len) {
-               if (!param->u.wpa_associate.wpa_ie)
-                       return -EINVAL;
-               if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
-                       return -EINVAL;
-               if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
-                       return -EFAULT;
-       }
+       if (param->u.wpa_associate.wpa_ie &&
+           copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
+           return -EINVAL;
 
        if (param->u.wpa_associate.mode == 1)
            pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
index ae2c0c06dd371bd9ebf6af87c2267c37a73c321b..d171b563e94c7e2bb10c092834799a434b6b8956 100644 (file)
@@ -2259,7 +2259,7 @@ out:
        return ret;
 }
 
-static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
+static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot);
 
 static ssize_t read_human_status(struct device *dev, struct device_attribute *attr,
                char *buf)
@@ -2322,7 +2322,7 @@ out:
        return ret;
 }
 
-static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
+static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, read_human_status, NULL);
 
 static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
                char *buf)
@@ -2354,7 +2354,7 @@ out:
        return ret;
 }
 
-static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
+static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL);
 
 #define UEA_ATTR(name, reset)                                  \
                                                                \
index e3017c46d5ec1db2427d9525e7041eccffbbb90c..e4eca7810bcf1bed6821086e4bab6130525e3c17 100644 (file)
@@ -170,7 +170,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
 {
        wb->use = 0;
        acm->transmitting--;
-       usb_autopm_put_interface_async(acm->control);
 }
 
 /*
@@ -212,12 +211,9 @@ static int acm_write_start(struct acm *acm, int wbn)
        }
 
        dbg("%s susp_count: %d", __func__, acm->susp_count);
-       usb_autopm_get_interface_async(acm->control);
        if (acm->susp_count) {
-               if (!acm->delayed_wb)
-                       acm->delayed_wb = wb;
-               else
-                       usb_autopm_put_interface_async(acm->control);
+               acm->delayed_wb = wb;
+               schedule_work(&acm->waker);
                spin_unlock_irqrestore(&acm->write_lock, flags);
                return 0;       /* A white lie */
        }
@@ -538,6 +534,23 @@ static void acm_softint(struct work_struct *work)
        tty_kref_put(tty);
 }
 
+static void acm_waker(struct work_struct *waker)
+{
+       struct acm *acm = container_of(waker, struct acm, waker);
+       int rv;
+
+       rv = usb_autopm_get_interface(acm->control);
+       if (rv < 0) {
+               dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
+               return;
+       }
+       if (acm->delayed_wb) {
+               acm_start_wb(acm, acm->delayed_wb);
+               acm->delayed_wb = NULL;
+       }
+       usb_autopm_put_interface(acm->control);
+}
+
 /*
  * TTY handlers
  */
@@ -971,8 +984,7 @@ static int acm_probe(struct usb_interface *intf,
        }
 
        if (!buflen) {
-               if (intf->cur_altsetting->endpoint &&
-                               intf->cur_altsetting->endpoint->extralen &&
+               if (intf->cur_altsetting->endpoint->extralen &&
                                intf->cur_altsetting->endpoint->extra) {
                        dev_dbg(&intf->dev,
                                "Seeking extra descriptors on endpoint\n");
@@ -1166,6 +1178,7 @@ made_compressed_probe:
        acm->urb_task.func = acm_rx_tasklet;
        acm->urb_task.data = (unsigned long) acm;
        INIT_WORK(&acm->work, acm_softint);
+       INIT_WORK(&acm->waker, acm_waker);
        init_waitqueue_head(&acm->drain_wait);
        spin_lock_init(&acm->throttle_lock);
        spin_lock_init(&acm->write_lock);
@@ -1202,7 +1215,7 @@ made_compressed_probe:
                if (rcv->urb == NULL) {
                        dev_dbg(&intf->dev,
                                "out of memory (read urbs usb_alloc_urb)\n");
-                       goto alloc_fail6;
+                       goto alloc_fail7;
                }
 
                rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -1226,7 +1239,7 @@ made_compressed_probe:
                if (snd->urb == NULL) {
                        dev_dbg(&intf->dev,
                                "out of memory (write urbs usb_alloc_urb)");
-                       goto alloc_fail8;
+                       goto alloc_fail7;
                }
 
                if (usb_endpoint_xfer_int(epwrite))
@@ -1265,7 +1278,6 @@ made_compressed_probe:
                i = device_create_file(&intf->dev,
                                                &dev_attr_iCountryCodeRelDate);
                if (i < 0) {
-                       device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
                        kfree(acm->country_codes);
                        goto skip_countries;
                }
@@ -1302,7 +1314,6 @@ alloc_fail8:
                usb_free_urb(acm->wb[i].urb);
 alloc_fail7:
        acm_read_buffers_free(acm);
-alloc_fail6:
        for (i = 0; i < num_rx_buf; i++)
                usb_free_urb(acm->ru[i].urb);
        usb_free_urb(acm->ctrlurb);
@@ -1332,6 +1343,7 @@ static void stop_data_traffic(struct acm *acm)
        tasklet_enable(&acm->urb_task);
 
        cancel_work_sync(&acm->work);
+       cancel_work_sync(&acm->waker);
 }
 
 static void acm_disconnect(struct usb_interface *intf)
@@ -1423,7 +1435,6 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
 static int acm_resume(struct usb_interface *intf)
 {
        struct acm *acm = usb_get_intfdata(intf);
-       struct acm_wb *wb;
        int rv = 0;
        int cnt;
 
@@ -1438,21 +1449,6 @@ static int acm_resume(struct usb_interface *intf)
        mutex_lock(&acm->mutex);
        if (acm->port.count) {
                rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
-
-               spin_lock_irq(&acm->write_lock);
-               if (acm->delayed_wb) {
-                       wb = acm->delayed_wb;
-                       acm->delayed_wb = NULL;
-                       spin_unlock_irq(&acm->write_lock);
-                       acm_start_wb(acm, wb);
-               } else {
-                       spin_unlock_irq(&acm->write_lock);
-               }
-
-               /*
-                * delayed error checking because we must
-                * do the write path at all cost
-                */
                if (rv < 0)
                        goto err_out;
 
@@ -1465,17 +1461,6 @@ err_out:
 }
 
 #endif /* CONFIG_PM */
-
-#define NOKIA_PCSUITE_ACM_INFO(x) \
-               USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
-               USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
-               USB_CDC_ACM_PROTO_VENDOR)
-
-#define SAMSUNG_PCSUITE_ACM_INFO(x) \
-               USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
-               USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
-               USB_CDC_ACM_PROTO_VENDOR)
-
 /*
  * USB driver structure.
  */
@@ -1533,76 +1518,6 @@ static struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
        .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
        },
-       { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
-       .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
-       },
-
-       /* Nokia S60 phones expose two ACM channels. The first is
-        * a modem and is picked up by the standard AT-command
-        * information below. The second is 'vendor-specific' but
-        * is treated as a serial device at the S60 end, so we want
-        * to expose it on Linux too. */
-       { NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */
-       { NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */
-       { NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */
-       { NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */
-       { NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */
-       { NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */
-       { NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */
-       { NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */
-       { NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */
-       { NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i  */
-       { NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */
-       { NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */
-       { NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */
-       { NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic &  */
-       { NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */
-       { NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */
-       { NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */
-       { NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */
-       { NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */
-       { NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
-       { NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
-       { NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB  */
-       { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
-       { NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
-       { NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
-       { NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */
-       { NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */
-       { NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3  */
-       { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
-       { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
-       { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
-       { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
-       { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
-       { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
-       { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
-       { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
-       { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
-       { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
-       { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
-       { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
-       { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
-
-       /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
-
-       /* control interfaces without any protocol set */
-       { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
-               USB_CDC_PROTO_NONE) },
 
        /* control interfaces with various AT-command sets */
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
@@ -1618,6 +1533,7 @@ static struct usb_device_id acm_ids[] = {
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
                USB_CDC_ACM_PROTO_AT_CDMA) },
 
+       /* NOTE:  COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
        { }
 };
 
index 519eb638b6e960a8325d65deff2f7829932050f6..c4a0ee8ffccfb3ae4203c8cac4d091d88177b478 100644 (file)
@@ -112,6 +112,7 @@ struct acm {
        struct mutex mutex;
        struct usb_cdc_line_coding line;                /* bits, stop, parity */
        struct work_struct work;                        /* work queue entry for line discipline waking up */
+       struct work_struct waker;
        wait_queue_head_t drain_wait;                   /* close processing */
        struct tasklet_struct urb_task;                 /* rx processing */
        spinlock_t throttle_lock;                       /* synchronize throtteling and read callback */
index 582aa87c4b0db444c16a376629c90adc6f77e704..24120db005a2c41503d8ae8175560c38d57a92fb 100644 (file)
@@ -946,11 +946,10 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
 
 static int proc_connectinfo(struct dev_state *ps, void __user *arg)
 {
-       struct usbdevfs_connectinfo ci = {
-               .devnum = ps->dev->devnum,
-               .slow = ps->dev->speed == USB_SPEED_LOW
-       };
+       struct usbdevfs_connectinfo ci;
 
+       ci.devnum = ps->dev->devnum;
+       ci.slow = ps->dev->speed == USB_SPEED_LOW;
        if (copy_to_user(arg, &ci, sizeof(ci)))
                return -EFAULT;
        return 0;
@@ -1177,13 +1176,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
                        free_async(as);
                        return -ENOMEM;
                }
-               /* Isochronous input data may end up being discontiguous
-                * if some of the packets are short.  Clear the buffer so
-                * that the gaps don't leak kernel data to userspace.
-                */
-               if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
-                       memset(as->urb->transfer_buffer, 0,
-                                       uurb->buffer_length);
        }
        as->urb->dev = ps->dev;
        as->urb->pipe = (uurb->type << 30) |
@@ -1320,14 +1312,10 @@ static int processcompl(struct async *as, void __user * __user *arg)
        void __user *addr = as->userurb;
        unsigned int i;
 
-       if (as->userbuffer && urb->actual_length) {
-               if (urb->number_of_packets > 0)         /* Isochronous */
-                       i = urb->transfer_buffer_length;
-               else                                    /* Non-Isoc */
-                       i = urb->actual_length;
-               if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
+       if (as->userbuffer && urb->actual_length)
+               if (copy_to_user(as->userbuffer, urb->transfer_buffer,
+                                urb->actual_length))
                        goto err_out;
-       }
        if (put_user(as->status, &userurb->status))
                goto err_out;
        if (put_user(urb->actual_length, &userurb->actual_length))
index d784a8b3a6bdb9d869ff2f20d63555bbfcb4f106..4f864472c5c4db4eea4004cd49e902cb4acd75c9 100644 (file)
@@ -625,6 +625,9 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
        struct usb_device *usb_dev;
 
+       /* driver is often null here; dev_dbg() would oops */
+       pr_debug("usb %s: uevent\n", dev_name(dev));
+
        if (is_usb_device(dev)) {
                usb_dev = to_usb_device(dev);
        } else if (is_usb_interface(dev)) {
@@ -636,7 +639,6 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
        }
 
        if (usb_dev->devnum < 0) {
-               /* driver is often null here; dev_dbg() would oops */
                pr_debug("usb %s: already deleted?\n", dev_name(dev));
                return -ENODEV;
        }
@@ -1175,8 +1177,9 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
                        udev->state == USB_STATE_SUSPENDED)
                goto done;
 
+       udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+
        if (msg.event & PM_EVENT_AUTO) {
-               udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
                status = autosuspend_check(udev, 0);
                if (status < 0)
                        goto done;
@@ -1741,23 +1744,6 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
        return status;
 }
 
-static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
-{
-       /* Remote wakeup is needed only when we actually go to sleep.
-        * For things like FREEZE and QUIESCE, if the device is already
-        * autosuspended then its current wakeup setting is okay.
-        */
-       if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
-               udev->do_remote_wakeup = 0;
-               return;
-       }
-
-       /* Allow remote wakeup if it is enabled, even if no interface drivers
-        * actually want it.
-        */
-       udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
-}
-
 int usb_suspend(struct device *dev, pm_message_t msg)
 {
        struct usb_device       *udev;
@@ -1777,7 +1763,6 @@ int usb_suspend(struct device *dev, pm_message_t msg)
        }
 
        udev->skip_sys_resume = 0;
-       choose_wakeup(udev, msg);
        return usb_external_suspend_device(udev, msg);
 }
 
index 8164ba53cab79f266462f8b8d3d655de2a31e470..222ee07ea680f303d9ce0d5b94daaeaa9aa22c1d 100644 (file)
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
 int usb_register_dev(struct usb_interface *intf,
                     struct usb_class_driver *class_driver)
 {
-       int retval;
+       int retval = -EINVAL;
        int minor_base = class_driver->minor_base;
-       int minor;
+       int minor = 0;
        char name[20];
        char *temp;
 
@@ -173,17 +173,12 @@ int usb_register_dev(struct usb_interface *intf,
         */
        minor_base = 0;
 #endif
+       intf->minor = -1;
 
-       if (class_driver->fops == NULL)
-               return -EINVAL;
-       if (intf->minor >= 0)
-               return -EADDRINUSE;
-
-       retval = init_usb_class();
-       if (retval)
-               return retval;
+       dbg ("looking for a minor, starting at %d", minor_base);
 
-       dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
+       if (class_driver->fops == NULL)
+               goto exit;
 
        down_write(&minor_rwsem);
        for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -191,12 +186,20 @@ int usb_register_dev(struct usb_interface *intf,
                        continue;
 
                usb_minors[minor] = class_driver->fops;
-               intf->minor = minor;
+
+               retval = 0;
                break;
        }
        up_write(&minor_rwsem);
-       if (intf->minor < 0)
-               return -EXFULL;
+
+       if (retval)
+               goto exit;
+
+       retval = init_usb_class();
+       if (retval)
+               goto exit;
+
+       intf->minor = minor;
 
        /* create a usb class device for this usb interface */
        snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -210,11 +213,11 @@ int usb_register_dev(struct usb_interface *intf,
                                      "%s", temp);
        if (IS_ERR(intf->usb_dev)) {
                down_write(&minor_rwsem);
-               usb_minors[minor] = NULL;
-               intf->minor = -1;
+               usb_minors[intf->minor] = NULL;
                up_write(&minor_rwsem);
                retval = PTR_ERR(intf->usb_dev);
        }
+exit:
        return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
index 1a78cd135aba22be68c6694a0d0ad2f1d655abdc..05e6d313961e3e3ce9ed3a09126f80ba65f7daff 100644 (file)
@@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_device *udev)
                 * than a vendor-specific driver. */
                else if (udev->descriptor.bDeviceClass !=
                                                USB_CLASS_VENDOR_SPEC &&
-                               (desc && desc->bInterfaceClass !=
+                               (!desc || desc->bInterfaceClass !=
                                                USB_CLASS_VENDOR_SPEC)) {
                        best = c;
                        break;
index 603e213f29b9c656770bd4b53e019509e95437f9..7b393ef73792f9dcefbff5de2b84781eb68d880c 100644 (file)
@@ -140,7 +140,7 @@ static const u8 usb3_rh_dev_descriptor[18] = {
        0x09,       /*  __u8  bMaxPacketSize0; 2^9 = 512 Bytes */
 
        0x6b, 0x1d, /*  __le16 idVendor; Linux Foundation */
-       0x03, 0x00, /*  __le16 idProduct; device 0x0003 */
+       0x02, 0x00, /*  __le16 idProduct; device 0x0002 */
        KERNEL_VER, KERNEL_REL, /*  __le16 bcdDevice */
 
        0x03,       /*  __u8  iManufacturer; */
index bcbe10476197b7e31bcdbe577518d58b4988bcea..79782a1c43f6098d98ac008bb5591b8607911ca1 100644 (file)
@@ -234,7 +234,7 @@ struct hc_driver {
        /* xHCI specific functions */
                /* Called by usb_alloc_dev to alloc HC device structures */
        int     (*alloc_dev)(struct usb_hcd *, struct usb_device *);
-               /* Called by usb_disconnect to free HC device structures */
+               /* Called by usb_release_dev to free HC device structures */
        void    (*free_dev)(struct usb_hcd *, struct usb_device *);
 
        /* Bandwidth computation functions */
index 7a631055ce64250aff1f007f2c27f7e770004b62..84042d80ecffc03dc6071ad146b2f81a9b422e75 100755 (executable)
@@ -22,7 +22,6 @@
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/freezer.h>
-#include <linux/usb/quirks.h>
 
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
@@ -1512,15 +1511,6 @@ static inline void usb_stop_pm(struct usb_device *udev)
 
 #endif
 
-static void hub_free_dev(struct usb_device *udev)
-{
-       struct usb_hcd *hcd = bus_to_hcd(udev->bus);
-
-       /* Root hubs aren't real devices, so don't free HCD resources */
-       if (hcd->driver->free_dev && udev->parent)
-               hcd->driver->free_dev(hcd, udev);
-}
-
 /**
  * usb_disconnect - disconnect a device (usbcore-internal)
  * @pdev: pointer to device being disconnected
@@ -1592,8 +1582,6 @@ void usb_disconnect(struct usb_device **pdev)
        usb_stop_pm(udev);
     
 
-       hub_free_dev(udev);
-
        put_device(&udev->dev);
 }
 
@@ -1773,6 +1761,7 @@ int usb_new_device(struct usb_device *udev)
        if (udev->parent)
                usb_autoresume_device(udev->parent);
 
+       usb_detect_quirks(udev);
        err = usb_enumerate_device(udev);       /* Read descriptors */
        if (err < 0)
                goto fail;
@@ -2825,16 +2814,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
        else
                i = udev->descriptor.bMaxPacketSize0;
        if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
-               if (udev->speed == USB_SPEED_LOW ||
+               if (udev->speed != USB_SPEED_FULL ||
                                !(i == 8 || i == 16 || i == 32 || i == 64)) {
-                       dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
+                       dev_err(&udev->dev, "ep0 maxpacket = %d\n", i);
                        retval = -EMSGSIZE;
                        goto fail;
                }
-               if (udev->speed == USB_SPEED_FULL)
-                       dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
-               else
-                       dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
+               dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
                udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
                usb_ep0_reinit(udev);
        }
@@ -3070,10 +3056,6 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
                if (status < 0)
                        goto loop;
 
-               usb_detect_quirks(udev);
-               if (udev->quirks & USB_QUIRK_DELAY_INIT)
-                       msleep(1000);
-
                /* consecutive bus-powered hubs aren't reliable; they can
                 * violate the voltage drop budget.  if the new child has
                 * a "powered" LED, users should notice we didn't enable it
@@ -3152,7 +3134,6 @@ loop_disable:
 loop:
                usb_ep0_reinit(udev);
                release_address(udev);
-               hub_free_dev(udev);
                usb_put_dev(udev);
                if ((status == -ENOTCONN) || (status == -ENOTSUPP))
                        break;
index 4a6366a42129196a112216771f727eca8b643da4..97b40ce133f0a8b7165a742d885dd91656843bc1 100644 (file)
@@ -515,13 +515,13 @@ static int fs_create_by_name (const char *name, mode_t mode,
        *dentry = NULL;
        mutex_lock(&parent->d_inode->i_mutex);
        *dentry = lookup_one_len(name, parent, strlen(name));
-       if (!IS_ERR(*dentry)) {
+       if (!IS_ERR(dentry)) {
                if ((mode & S_IFMT) == S_IFDIR)
                        error = usbfs_mkdir (parent->d_inode, *dentry, mode);
                else 
                        error = usbfs_create (parent->d_inode, *dentry, mode);
        } else
-               error = PTR_ERR(*dentry);
+               error = PTR_ERR(dentry);
        mutex_unlock(&parent->d_inode->i_mutex);
 
        return error;
index 409cc94a1331f0dfe0f42f420947b406bf65b1af..980a8d27fa50688c48585bfecb2cf849b21c0f34 100644 (file)
@@ -1185,6 +1185,13 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
 {
        int i;
 
+       dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
+               skip_ep0 ? "non-ep0" : "all");
+       for (i = skip_ep0; i < 16; ++i) {
+               usb_disable_endpoint(dev, i, true);
+               usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+       }
+
        /* getting rid of interfaces will disconnect
         * any drivers bound to them (a key side effect)
         */
@@ -1214,13 +1221,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
                if (dev->state == USB_STATE_CONFIGURED)
                        usb_set_device_state(dev, USB_STATE_ADDRESS);
        }
-
-       dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
-               skip_ep0 ? "non-ep0" : "all");
-       for (i = skip_ep0; i < 16; ++i) {
-               usb_disable_endpoint(dev, i, true);
-               usb_disable_endpoint(dev, i + USB_DIR_IN, true);
-       }
 }
 
 /**
@@ -1792,7 +1792,6 @@ free_interfaces:
                intf->dev.groups = usb_interface_groups;
                intf->dev.dma_mask = dev->dev.dma_mask;
                INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
-               intf->minor = -1;
                device_initialize(&intf->dev);
                mark_quiesced(intf);
                dev_set_name(&intf->dev, "%d-%s:%d.%d",
index 80b062b28ce57720bcfd16ae73023332822b1e4c..ab93918d92076dedec3575b49e5eaacfb2c32e71 100644 (file)
@@ -38,16 +38,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
-       /* Logitech Harmony 700-series */
-       { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
-
        /* Philips PSC805 audio device */
        { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
 
-       /* Artisman Watchdog Dongle */
-       { USB_DEVICE(0x04b4, 0x0526), .driver_info =
-                       USB_QUIRK_CONFIG_INTF_STRINGS },
-
        /* Roland SC-8820 */
        { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -71,9 +64,6 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
        { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
 
-       /* Broadcom BCM92035DGROM BT dongle */
-       { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
-
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index da9a2b83567092c52c1123a300e0b16a713e3929..0885d4abdc6265d0b7775da9b9f5503fc2be0dff 100644 (file)
@@ -137,16 +137,6 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
 }
 EXPORT_SYMBOL_GPL(usb_anchor_urb);
 
-/* Callers must hold anchor->lock */
-static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
-{
-       urb->anchor = NULL;
-       list_del(&urb->anchor_list);
-       usb_put_urb(urb);
-       if (list_empty(&anchor->urb_list))
-               wake_up(&anchor->wait);
-}
-
 /**
  * usb_unanchor_urb - unanchors an URB
  * @urb: pointer to the urb to anchor
@@ -166,14 +156,17 @@ void usb_unanchor_urb(struct urb *urb)
                return;
 
        spin_lock_irqsave(&anchor->lock, flags);
-       /*
-        * At this point, we could be competing with another thread which
-        * has the same intention. To protect the urb from being unanchored
-        * twice, only the winner of the race gets the job.
-        */
-       if (likely(anchor == urb->anchor))
-               __usb_unanchor_urb(urb, anchor);
+       if (unlikely(anchor != urb->anchor)) {
+               /* we've lost the race to another thread */
+               spin_unlock_irqrestore(&anchor->lock, flags);
+               return;
+       }
+       urb->anchor = NULL;
+       list_del(&urb->anchor_list);
        spin_unlock_irqrestore(&anchor->lock, flags);
+       usb_put_urb(urb);
+       if (list_empty(&anchor->urb_list))
+               wake_up(&anchor->wait);
 }
 EXPORT_SYMBOL_GPL(usb_unanchor_urb);
 
@@ -732,11 +725,20 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
 void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
 {
        struct urb *victim;
+       unsigned long flags;
 
-       while ((victim = usb_get_from_anchor(anchor)) != NULL) {
+       spin_lock_irqsave(&anchor->lock, flags);
+       while (!list_empty(&anchor->urb_list)) {
+               victim = list_entry(anchor->urb_list.prev, struct urb,
+                                   anchor_list);
+               usb_get_urb(victim);
+               spin_unlock_irqrestore(&anchor->lock, flags);
+               /* this will unanchor the URB */
                usb_unlink_urb(victim);
                usb_put_urb(victim);
+               spin_lock_irqsave(&anchor->lock, flags);
        }
+       spin_unlock_irqrestore(&anchor->lock, flags);
 }
 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
 
@@ -773,11 +775,12 @@ struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
                victim = list_entry(anchor->urb_list.next, struct urb,
                                    anchor_list);
                usb_get_urb(victim);
-               __usb_unanchor_urb(victim, anchor);
+               spin_unlock_irqrestore(&anchor->lock, flags);
+               usb_unanchor_urb(victim);
        } else {
+               spin_unlock_irqrestore(&anchor->lock, flags);
                victim = NULL;
        }
-       spin_unlock_irqrestore(&anchor->lock, flags);
 
        return victim;
 }
@@ -799,7 +802,12 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
        while (!list_empty(&anchor->urb_list)) {
                victim = list_entry(anchor->urb_list.prev, struct urb,
                                    anchor_list);
-               __usb_unanchor_urb(victim, anchor);
+               usb_get_urb(victim);
+               spin_unlock_irqrestore(&anchor->lock, flags);
+               /* this may free the URB */
+               usb_unanchor_urb(victim);
+               usb_put_urb(victim);
+               spin_lock_irqsave(&anchor->lock, flags);
        }
        spin_unlock_irqrestore(&anchor->lock, flags);
 }
index b2379383ba13e8346593d15312fbac2f40b50681..20bb9e140febcc23eef24729f777b4d23809507f 100755 (executable)
@@ -191,6 +191,9 @@ static void usb_release_dev(struct device *dev)
        hcd = bus_to_hcd(udev->bus);
 
        usb_destroy_configuration(udev);
+       /* Root hubs aren't real devices, so don't free HCD resources */
+       if (hcd->driver->free_dev && udev->parent)
+               hcd->driver->free_dev(hcd, udev);
        usb_put_hcd(hcd);
        kfree(udev->product);
        kfree(udev->manufacturer);
index 946cbcfbcfbf6173a124e4c1676c6b245edf55ad..4e970cf0e29ae364b34c5e7436906e95db313952 100644 (file)
@@ -2013,9 +2013,6 @@ static int __init usba_udc_probe(struct platform_device *pdev)
                        } else {
                                disable_irq(gpio_to_irq(udc->vbus_pin));
                        }
-               } else {
-                       /* gpio_request fail so use -EINVAL for gpio_is_valid */
-                       udc->vbus_pin = -EINVAL;
                }
        }
 
index 08a9a62a39e34910ecbcb3fdb8381c044fa851e4..fa3d142ba64d966aa4bf454a4f2a55480970bc4f 100644 (file)
@@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
        case USB_ENDPOINT_XFER_ISOC:
                /* Calculate transactions needed for high bandwidth iso */
                mult = (unsigned char)(1 + ((max >> 11) & 0x03));
-               max = max & 0x7ff;      /* bit 0~10 */
+               max = max & 0x8ff;      /* bit 0~10 */
                /* 3 transactions at most */
                if (mult > 3)
                        goto en_done;
index 33ac6acbdb780f0886a546fc3b31ed5dd9ab31ef..48267bc0b2e00c151bf05e8ed6caa2662e1de9a5 100644 (file)
@@ -291,13 +291,9 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
        /* mandatory */
        case OID_GEN_VENDOR_DESCRIPTION:
                pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
-               if ( rndis_per_dev_params [configNr].vendorDescr ) {
-                       length = strlen (rndis_per_dev_params [configNr].vendorDescr);
-                       memcpy (outbuf,
-                               rndis_per_dev_params [configNr].vendorDescr, length);
-               } else {
-                       outbuf[0] = 0;
-               }
+               length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+               memcpy (outbuf,
+                       rndis_per_dev_params [configNr].vendorDescr, length);
                retval = 0;
                break;
 
index 9e5f9f1e47a2e8023576a20649a203bf9a8cc327..adf8260c3a6aeff9f295e4ad5bf45cdfcb23233c 100644 (file)
@@ -535,11 +535,17 @@ recycle:
                list_move(&req->list, &port->read_pool);
        }
 
-       /* Push from tty to ldisc; without low_latency set this is handled by
-        * a workqueue, so we won't get callbacks and can hold port_lock
+       /* Push from tty to ldisc; this is immediate with low_latency, and
+        * may trigger callbacks to this driver ... so drop the spinlock.
         */
        if (tty && do_push) {
+               spin_unlock_irq(&port->port_lock);
                tty_flip_buffer_push(tty);
+               wake_up_interruptible(&tty->read_wait);
+               spin_lock_irq(&port->port_lock);
+
+               /* tty may have been closed */
+               tty = port->port_tty;
        }
 
 
@@ -777,6 +783,11 @@ static int gs_open(struct tty_struct *tty, struct file *file)
        port->open_count = 1;
        port->openclose = false;
 
+       /* low_latency means ldiscs work in tasklet context, without
+        * needing a workqueue schedule ... easier to keep up.
+        */
+       tty->low_latency = 1;
+
        /* if connected, start the I/O stream */
        if (port->port_usb) {
                struct gserial  *gser = port->port_usb;
@@ -1183,7 +1194,6 @@ void gserial_cleanup(void)
        n_ports = 0;
 
        tty_unregister_driver(gs_tty_driver);
-       put_tty_driver(gs_tty_driver);
        gs_tty_driver = NULL;
 
        pr_debug("%s: cleaned up ttyGS* support\n", __func__);
index 8198fc0e4ac6b6592585bb17addd2698900d1535..e18c6773809f47f740988f96d66634a586d24cdb 100644 (file)
@@ -543,7 +543,6 @@ static int ehci_init(struct usb_hcd *hcd)
         */
        ehci->periodic_size = DEFAULT_I_TDPS;
        INIT_LIST_HEAD(&ehci->cached_itd_list);
-       INIT_LIST_HEAD(&ehci->cached_sitd_list);
        if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
                return retval;
 
@@ -994,7 +993,7 @@ rescan:
        /* endpoints can be iso streams.  for now, we don't
         * accelerate iso completions ... so spin a while.
         */
-       if (qh->hw == NULL) {
+       if (qh->hw->hw_info1 == 0) {
                ehci_vdbg (ehci, "iso delay\n");
                goto idle_timeout;
        }
@@ -1008,11 +1007,10 @@ rescan:
                                tmp && tmp != qh;
                                tmp = tmp->qh_next.qh)
                        continue;
-               /* periodic qh self-unlinks on empty, and a COMPLETING qh
-                * may already be unlinked.
-                */
-               if (tmp)
-                       unlink_async(ehci, qh);
+               /* periodic qh self-unlinks on empty */
+               if (!tmp)
+                       goto nogood;
+               unlink_async (ehci, qh);
                /* FALL THROUGH */
        case QH_STATE_UNLINK:           /* wait for hw to finish? */
        case QH_STATE_UNLINK_WAIT:
@@ -1029,6 +1027,7 @@ idle_timeout:
                }
                /* else FALL THROUGH */
        default:
+nogood:
                /* caller was supposed to have unlinked any requests;
                 * that's not our job.  just leak this memory.
                 */
index 6ac3976ef3b2a2be6b0d4d44f50cd5ecec052ff8..698f46135d5e8cd690eeff5176a70bf8554e09e2 100644 (file)
@@ -292,16 +292,6 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
        /* manually resume the ports we suspended during bus_suspend() */
        i = HCS_N_PORTS (ehci->hcs_params);
        while (i--) {
-               /* clear phy low power mode before resume */
-               if (ehci->has_hostpc) {
-                       u32 __iomem     *hostpc_reg =
-                               (u32 __iomem *)((u8 *)ehci->regs
-                               + HOSTPC0 + 4 * (i & 0xff));
-                       temp = ehci_readl(ehci, hostpc_reg);
-                       ehci_writel(ehci, temp & ~HOSTPC_PHCD,
-                               hostpc_reg);
-                       mdelay(5);
-               }
                temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
                temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
                if (test_bit(i, &ehci->bus_suspended) &&
@@ -686,13 +676,6 @@ static int ehci_hub_control (
                        if (temp & PORT_SUSPEND) {
                                if ((temp & PORT_PE) == 0)
                                        goto error;
-                               /* clear phy low power mode before resume */
-                               if (hostpc_reg) {
-                                       temp1 = ehci_readl(ehci, hostpc_reg);
-                                       ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
-                                               hostpc_reg);
-                                       mdelay(5);
-                               }
                                /* resume signaling for 20 msec */
                                temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
                                ehci_writel(ehci, temp | PORT_RESUME,
index 1f3f01eacaf09cecb88af40187910465cb49b0f3..aeda96e0af67bd6eb43b3b607e2c7a3817da8c5d 100644 (file)
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
 
 static void ehci_mem_cleanup (struct ehci_hcd *ehci)
 {
-       free_cached_lists(ehci);
+       free_cached_itd_list(ehci);
        if (ehci->async)
                qh_put (ehci->async);
        ehci->async = NULL;
index ab26c2be366aa8836c7f7bee692df25cf995fd47..36f96da129f5c637208031ec04913be3f0cc6e67 100644 (file)
@@ -192,19 +192,17 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
        }
 
        rv = usb_add_hcd(hcd, irq, 0);
-       if (rv)
-               goto err_ehci;
-
-       return 0;
+       if (rv == 0)
+               return 0;
 
-err_ehci:
-       if (ehci->has_amcc_usb23)
-               iounmap(ehci->ohci_hcctrl_reg);
        iounmap(hcd->regs);
 err_ioremap:
        irq_dispose_mapping(irq);
 err_irq:
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+       if (ehci->has_amcc_usb23)
+               iounmap(ehci->ohci_hcctrl_reg);
 err_rmr:
        usb_put_hcd(hcd);
 
index 6746a8a794d40a2d3030ecd3469a9081ad586e18..a5535b5e3fe27860caebb3d50f228ea6d85fd1b8 100644 (file)
@@ -1121,8 +1121,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
                                        urb->interval);
                }
 
-       /* if dev->ep [epnum] is a QH, hw is set */
-       } else if (unlikely (stream->hw != NULL)) {
+       /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
+       } else if (unlikely (stream->hw_info1 != 0)) {
                ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
                        urb->dev->devpath, epnum,
                        usb_pipein(urb->pipe) ? "in" : "out");
@@ -1553,27 +1553,13 @@ itd_patch(
 static inline void
 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
 {
-       union ehci_shadow       *prev = &ehci->pshadow[frame];
-       __hc32                  *hw_p = &ehci->periodic[frame];
-       union ehci_shadow       here = *prev;
-       __hc32                  type = 0;
-
-       /* skip any iso nodes which might belong to previous microframes */
-       while (here.ptr) {
-               type = Q_NEXT_TYPE(ehci, *hw_p);
-               if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
-                       break;
-               prev = periodic_next_shadow(ehci, prev, type);
-               hw_p = shadow_next_periodic(ehci, &here, type);
-               here = *prev;
-       }
-
-       itd->itd_next = here;
-       itd->hw_next = *hw_p;
-       prev->itd = itd;
+       /* always prepend ITD/SITD ... only QH tree is order-sensitive */
+       itd->itd_next = ehci->pshadow [frame];
+       itd->hw_next = ehci->periodic [frame];
+       ehci->pshadow [frame].itd = itd;
        itd->frame = frame;
        wmb ();
-       *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
+       ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
 }
 
 /* fit urb's itds into the selected schedule slot; activate as needed */
@@ -2127,27 +2113,13 @@ sitd_complete (
                        (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
        }
        iso_stream_put (ehci, stream);
-
+       /* OK to recycle this SITD now that its completion callback ran. */
 done:
        sitd->urb = NULL;
-       if (ehci->clock_frame != sitd->frame) {
-               /* OK to recycle this SITD now. */
-               sitd->stream = NULL;
-               list_move(&sitd->sitd_list, &stream->free_list);
-               iso_stream_put(ehci, stream);
-       } else {
-               /* HW might remember this SITD, so we can't recycle it yet.
-                * Move it to a safe place until a new frame starts.
-                */
-               list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
-               if (stream->refcount == 2) {
-                       /* If iso_stream_put() were called here, stream
-                        * would be freed.  Instead, just prevent reuse.
-                        */
-                       stream->ep->hcpriv = NULL;
-                       stream->ep = NULL;
-               }
-       }
+       sitd->stream = NULL;
+       list_move(&sitd->sitd_list, &stream->free_list);
+       iso_stream_put(ehci, stream);
+
        return retval;
 }
 
@@ -2213,10 +2185,9 @@ done:
 
 /*-------------------------------------------------------------------------*/
 
-static void free_cached_lists(struct ehci_hcd *ehci)
+static void free_cached_itd_list(struct ehci_hcd *ehci)
 {
        struct ehci_itd *itd, *n;
-       struct ehci_sitd *sitd, *sn;
 
        list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
                struct ehci_iso_stream  *stream = itd->stream;
@@ -2224,13 +2195,6 @@ static void free_cached_lists(struct ehci_hcd *ehci)
                list_move(&itd->itd_list, &stream->free_list);
                iso_stream_put(ehci, stream);
        }
-
-       list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
-               struct ehci_iso_stream  *stream = sitd->stream;
-               sitd->stream = NULL;
-               list_move(&sitd->sitd_list, &stream->free_list);
-               iso_stream_put(ehci, stream);
-       }
 }
 
 /*-------------------------------------------------------------------------*/
@@ -2257,7 +2221,7 @@ scan_periodic (struct ehci_hcd *ehci)
                clock_frame = -1;
        }
        if (ehci->clock_frame != clock_frame) {
-               free_cached_lists(ehci);
+               free_cached_itd_list(ehci);
                ehci->clock_frame = clock_frame;
        }
        clock %= mod;
@@ -2420,7 +2384,7 @@ restart:
                        clock = now;
                        clock_frame = clock >> 3;
                        if (ehci->clock_frame != clock_frame) {
-                               free_cached_lists(ehci);
+                               free_cached_itd_list(ehci);
                                ehci->clock_frame = clock_frame;
                        }
                } else {
index 556c0b48f3abd73278206e5c1581eb3d32b73850..2d85e21ff282e9199be521fe630588cf8b3e01fb 100644 (file)
@@ -87,9 +87,8 @@ struct ehci_hcd {                     /* one per controller */
        int                     next_uframe;    /* scan periodic, start here */
        unsigned                periodic_sched; /* periodic activity count */
 
-       /* list of itds & sitds completed while clock_frame was still active */
+       /* list of itds completed while clock_frame was still active */
        struct list_head        cached_itd_list;
-       struct list_head        cached_sitd_list;
        unsigned                clock_frame;
 
        /* per root hub port */
@@ -196,7 +195,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
        clear_bit (action, &ehci->actions);
 }
 
-static void free_cached_lists(struct ehci_hcd *ehci);
+static void free_cached_itd_list(struct ehci_hcd *ehci);
 
 /*-------------------------------------------------------------------------*/
 
@@ -395,8 +394,9 @@ struct ehci_iso_sched {
  * acts like a qh would, if EHCI had them for ISO.
  */
 struct ehci_iso_stream {
-       /* first field matches ehci_hq, but is NULL */
-       struct ehci_qh_hw       *hw;
+       /* first two fields match QH, but info1 == 0 */
+       __hc32                  hw_next;
+       __hc32                  hw_info1;
 
        u32                     refcount;
        u8                      bEndpointAddress;
index 65cac8cc8921254718d835860a9ad2871d13b823..32bbce9718f0626b98cc37e09b14faa51a5cc16f 100644 (file)
@@ -697,7 +697,7 @@ static int ohci_hub_control (
        u16             wLength
 ) {
        struct ohci_hcd *ohci = hcd_to_ohci (hcd);
-       int             ports = ohci->num_ports;
+       int             ports = hcd_to_bus (hcd)->root_hub->maxchild;
        u32             temp;
        int             retval = 0;
 
index 1f1d4fa6a778dca7f892da60b16b15b8db1a6395..100bf3d8437c12e7e1d68e090759c79c355aa5cb 100644 (file)
@@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
        }
        i2c_adap = i2c_get_adapter(2);
        memset(&i2c_info, 0, sizeof(struct i2c_board_info));
-       strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
+       strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
        isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
                                                   normal_i2c);
        i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@ out3:
 out2:
        clk_put(usb_clk);
 out1:
-       i2c_unregister_device(isp1301_i2c_client);
+       i2c_unregister_client(isp1301_i2c_client);
        isp1301_i2c_client = NULL;
 out_i2c_driver:
        i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
        pnx4008_unset_usb_bits();
        clk_disable(usb_clk);
        clk_put(usb_clk);
-       i2c_unregister_device(isp1301_i2c_client);
+       i2c_unregister_client(isp1301_i2c_client);
        isp1301_i2c_client = NULL;
        i2c_del_driver(&isp1301_driver);
 
index e3548ee6681cb8a2196724c50cbc99e5ab104f3f..9260c743baa6d66e8515f29621ec68c01ddb67ea 100644 (file)
@@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
 
 /* this function must be called with interrupt disabled */
 static void free_usb_address(struct r8a66597 *r8a66597,
-                            struct r8a66597_device *dev, int reset)
+                            struct r8a66597_device *dev)
 {
        int port;
 
@@ -430,13 +430,7 @@ static void free_usb_address(struct r8a66597 *r8a66597,
        dev->state = USB_STATE_DEFAULT;
        r8a66597->address_map &= ~(1 << dev->address);
        dev->address = 0;
-       /*
-        * Only when resetting USB, it is necessary to erase drvdata. When
-        * a usb device with usb hub is disconnect, "dev->udev" is already
-        * freed on usb_desconnect(). So we cannot access the data.
-        */
-       if (reset)
-               dev_set_drvdata(&dev->udev->dev, NULL);
+       dev_set_drvdata(&dev->udev->dev, NULL);
        list_del(&dev->device_list);
        kfree(dev);
 
@@ -1073,7 +1067,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
        struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
 
        disable_r8a66597_pipe_all(r8a66597, dev);
-       free_usb_address(r8a66597, dev, 0);
+       free_usb_address(r8a66597, dev);
 
        start_root_hub_sampling(r8a66597, port, 0);
 }
@@ -2091,7 +2085,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597,
                                spin_lock_irqsave(&r8a66597->lock, flags);
                                dev = get_r8a66597_device(r8a66597, addr);
                                disable_r8a66597_pipe_all(r8a66597, dev);
-                               free_usb_address(r8a66597, dev, 0);
+                               free_usb_address(r8a66597, dev);
                                put_child_connect_map(r8a66597, addr);
                                spin_unlock_irqrestore(&r8a66597->lock, flags);
                        }
@@ -2234,7 +2228,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        rh->port |= (1 << USB_PORT_FEAT_RESET);
 
                        disable_r8a66597_pipe_all(r8a66597, dev);
-                       free_usb_address(r8a66597, dev, 1);
+                       free_usb_address(r8a66597, dev);
 
                        r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
                                      get_dvstctr_reg(port));
index 09197067fe6bc1032c4859fa557b5b15bb8be92c..99cd00fd3514c7f25abb0b42977e14c61edbb6ca 100644 (file)
@@ -735,7 +735,6 @@ static void uhci_stop(struct usb_hcd *hcd)
                uhci_hc_died(uhci);
        uhci_scan_schedule(uhci);
        spin_unlock_irq(&uhci->lock);
-       synchronize_irq(hcd->irq);
 
        del_timer_sync(&uhci->fsbr_timer);
        release_uhci(uhci);
index 78c4edac1db14cf324cd90a039467d22f90ae53f..ecc131c3fe337a68788054dfc2dc695503b30a77 100644 (file)
@@ -101,15 +101,12 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
 
        next = readl(base + ext_offset);
 
-       if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
+       if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
                /* Find the first extended capability */
                next = XHCI_HCC_EXT_CAPS(next);
-               ext_offset = 0;
-       } else {
+       else
                /* Find the next extended capability */
                next = XHCI_EXT_CAPS_NEXT(next);
-       }
-
        if (!next)
                return 0;
        /*
index a24a92f7dbc0de64dc3e6384e014c762e4c96ca8..932f9993848175e476244fd23fa90303cc6b7d61 100644 (file)
@@ -96,33 +96,6 @@ int xhci_halt(struct xhci_hcd *xhci)
                        STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
 }
 
-/*
- * Set the run bit and wait for the host to be running.
- */
-int xhci_start(struct xhci_hcd *xhci)
-{
-       u32 temp;
-       int ret;
-
-       temp = xhci_readl(xhci, &xhci->op_regs->command);
-       temp |= (CMD_RUN);
-       xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
-                       temp);
-       xhci_writel(xhci, temp, &xhci->op_regs->command);
-
-       /*
-        * Wait for the HCHalted Status bit to be 0 to indicate the host is
-        * running.
-        */
-       ret = handshake(xhci, &xhci->op_regs->status,
-                       STS_HALT, 0, XHCI_MAX_HALT_USEC);
-       if (ret == -ETIMEDOUT)
-               xhci_err(xhci, "Host took too long to start, "
-                               "waited %u microseconds.\n",
-                               XHCI_MAX_HALT_USEC);
-       return ret;
-}
-
 /*
  * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
  *
@@ -134,7 +107,6 @@ int xhci_reset(struct xhci_hcd *xhci)
 {
        u32 command;
        u32 state;
-       int ret;
 
        state = xhci_readl(xhci, &xhci->op_regs->status);
        if ((state & STS_HALT) == 0) {
@@ -149,17 +121,7 @@ int xhci_reset(struct xhci_hcd *xhci)
        /* XXX: Why does EHCI set this here?  Shouldn't other code do this? */
        xhci_to_hcd(xhci)->state = HC_STATE_HALT;
 
-       ret = handshake(xhci, &xhci->op_regs->command,
-                       CMD_RESET, 0, 250 * 1000);
-       if (ret)
-               return ret;
-
-       xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
-       /*
-        * xHCI cannot write to any doorbells or operational registers other
-        * than status until the "Controller Not Ready" flag is cleared.
-        */
-       return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
+       return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
 }
 
 /*
@@ -498,11 +460,13 @@ int xhci_run(struct usb_hcd *hcd)
        if (NUM_TEST_NOOPS > 0)
                doorbell = xhci_setup_one_noop(xhci);
 
-       if (xhci_start(xhci)) {
-               xhci_halt(xhci);
-               return -ENODEV;
-       }
-
+       temp = xhci_readl(xhci, &xhci->op_regs->command);
+       temp |= (CMD_RUN);
+       xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+                       temp);
+       xhci_writel(xhci, temp, &xhci->op_regs->command);
+       /* Flush PCI posted writes */
+       temp = xhci_readl(xhci, &xhci->op_regs->command);
        xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
        if (doorbell)
                (*doorbell)(xhci);
@@ -1193,7 +1157,6 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
                cmd_completion = &virt_dev->cmd_completion;
                cmd_status = &virt_dev->cmd_status;
        }
-       init_completion(cmd_completion);
 
        if (!ctx_change)
                ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
@@ -1450,8 +1413,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
                kfree(virt_ep->stopped_td);
                xhci_ring_cmd_db(xhci);
        }
-       virt_ep->stopped_td = NULL;
-       virt_ep->stopped_trb = NULL;
        spin_unlock_irqrestore(&xhci->lock, flags);
 
        if (ret)
index dd71f02d5ff4e9b3868e9fb0decf7ed662bce169..b8fd270a8b0d5f2630ed87dbcf6d24f8a566c86e 100644 (file)
@@ -496,19 +496,6 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
        return EP_INTERVAL(interval);
 }
 
-/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
- * High speed endpoint descriptors can define "the number of additional
- * transaction opportunities per microframe", but that goes in the Max Burst
- * endpoint context field.
- */
-static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
-               struct usb_host_endpoint *ep)
-{
-       if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
-               return 0;
-       return ep->ss_ep_comp->desc.bmAttributes;
-}
-
 static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
@@ -539,36 +526,6 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
        return type;
 }
 
-/* Return the maximum endpoint service interval time (ESIT) payload.
- * Basically, this is the maxpacket size, multiplied by the burst size
- * and mult size.
- */
-static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
-               struct usb_device *udev,
-               struct usb_host_endpoint *ep)
-{
-       int max_burst;
-       int max_packet;
-
-       /* Only applies for interrupt or isochronous endpoints */
-       if (usb_endpoint_xfer_control(&ep->desc) ||
-                       usb_endpoint_xfer_bulk(&ep->desc))
-               return 0;
-
-       if (udev->speed == USB_SPEED_SUPER) {
-               if (ep->ss_ep_comp)
-                       return ep->ss_ep_comp->desc.wBytesPerInterval;
-               xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
-               /* Assume no bursts, no multiple opportunities to send. */
-               return ep->desc.wMaxPacketSize;
-       }
-
-       max_packet = ep->desc.wMaxPacketSize & 0x3ff;
-       max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
-       /* A 0 in max burst means 1 transfer per ESIT */
-       return max_packet * (max_burst + 1);
-}
-
 int xhci_endpoint_init(struct xhci_hcd *xhci,
                struct xhci_virt_device *virt_dev,
                struct usb_device *udev,
@@ -580,7 +537,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        struct xhci_ring *ep_ring;
        unsigned int max_packet;
        unsigned int max_burst;
-       u32 max_esit_payload;
 
        ep_index = xhci_get_endpoint_index(&ep->desc);
        ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
@@ -594,7 +550,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
 
        ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
-       ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
 
        /* FIXME dig Mult and streams info out of ep companion desc */
 
@@ -640,26 +595,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        default:
                BUG();
        }
-       max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
-       ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
-
-       /*
-        * XXX no idea how to calculate the average TRB buffer length for bulk
-        * endpoints, as the driver gives us no clue how big each scatter gather
-        * list entry (or buffer) is going to be.
-        *
-        * For isochronous and interrupt endpoints, we set it to the max
-        * available, until we have new API in the USB core to allow drivers to
-        * declare how much bandwidth they actually need.
-        *
-        * Normally, it would be calculated by taking the total of the buffer
-        * lengths in the TD and then dividing by the number of TRBs in a TD,
-        * including link TRBs, No-op TRBs, and Event data TRBs.  Since we don't
-        * use Event Data TRBs, and we don't chain in a link TRB on short
-        * transfers, we're basically dividing by 1.
-        */
-       ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
-
        /* FIXME Debug endpoint context */
        return 0;
 }
index 6416a0fca012b0ad80366559ce7a34028d4bdaf3..821b7b4709de6531b28afb78c428ad4f5379ff81 100644 (file)
@@ -124,7 +124,7 @@ static void next_trb(struct xhci_hcd *xhci,
                *seg = (*seg)->next;
                *trb = ((*seg)->trbs);
        } else {
-               (*trb)++;
+               *trb = (*trb)++;
        }
 }
 
@@ -241,27 +241,10 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
        int i;
        union xhci_trb *enq = ring->enqueue;
        struct xhci_segment *enq_seg = ring->enq_seg;
-       struct xhci_segment *cur_seg;
-       unsigned int left_on_ring;
 
        /* Check if ring is empty */
-       if (enq == ring->dequeue) {
-               /* Can't use link trbs */
-               left_on_ring = TRBS_PER_SEGMENT - 1;
-               for (cur_seg = enq_seg->next; cur_seg != enq_seg;
-                               cur_seg = cur_seg->next)
-                       left_on_ring += TRBS_PER_SEGMENT - 1;
-
-               /* Always need one TRB free in the ring. */
-               left_on_ring -= 1;
-               if (num_trbs > left_on_ring) {
-                       xhci_warn(xhci, "Not enough room on ring; "
-                                       "need %u TRBs, %u TRBs left\n",
-                                       num_trbs, left_on_ring);
-                       return 0;
-               }
+       if (enq == ring->dequeue)
                return 1;
-       }
        /* Make sure there's an extra empty TRB available */
        for (i = 0; i <= num_trbs; ++i) {
                if (enq == ring->dequeue)
@@ -350,8 +333,7 @@ static struct xhci_segment *find_trb_seg(
        while (cur_seg->trbs > trb ||
                        &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
                generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
-               if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
-                               TRB_TYPE(TRB_LINK) &&
+               if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
                                (generic_trb->field[3] & LINK_TOGGLE))
                        *cycle_state = ~(*cycle_state) & 0x1;
                cur_seg = cur_seg->next;
@@ -407,7 +389,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
                BUG();
 
        trb = &state->new_deq_ptr->generic;
-       if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
+       if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
                                (trb->field[3] & LINK_TOGGLE))
                state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -566,8 +548,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
                /* Otherwise just ring the doorbell to restart the ring */
                ring_ep_doorbell(xhci, slot_id, ep_index);
        }
-       ep->stopped_td = NULL;
-       ep->stopped_trb = NULL;
 
        /*
         * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1085,13 +1065,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 
                        ep->stopped_td = td;
                        ep->stopped_trb = event_trb;
-
                        xhci_queue_reset_ep(xhci, slot_id, ep_index);
                        xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
-
-                       ep->stopped_td = NULL;
-                       ep->stopped_trb = NULL;
-
                        xhci_ring_cmd_db(xhci);
                        goto td_cleanup;
                default:
@@ -1211,10 +1186,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
                                        cur_trb != event_trb;
                                        next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-                               if ((cur_trb->generic.field[3] &
-                                TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
-                                   (cur_trb->generic.field[3] &
-                                TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+                               if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
+                                               TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
                                        td->urb->actual_length +=
                                                TRB_LEN(cur_trb->generic.field[2]);
                        }
index db821e98210096b13f1728f8581ce414bba1c246..4b254b6fa2456468731925199a69de85afd84602 100644 (file)
@@ -609,10 +609,6 @@ struct xhci_ep_ctx {
 #define MAX_PACKET_MASK                (0xffff << 16)
 #define MAX_PACKET_DECODED(p)  (((p) >> 16) & 0xffff)
 
-/* tx_info bitmasks */
-#define AVG_TRB_LENGTH_FOR_EP(p)       ((p) & 0xffff)
-#define MAX_ESIT_PAYLOAD_FOR_EP(p)     (((p) & 0xffff) << 16)
-
 
 /**
  * struct xhci_input_control_context
index 49deeb6c3b528135ed8fc207a101e12e857f26e7..5720bfef6a389ada6709be2ffac3aca06a1d969a 100644 (file)
@@ -195,9 +195,11 @@ static ssize_t get_port1_handler(struct device *dev,
        return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
 }
 
-static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler);
+static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO,
+                  get_port0_handler, set_port0_handler);
 
-static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler);
+static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO,
+                  get_port1_handler, set_port1_handler);
 
 
 static int cypress_probe(struct usb_interface *interface,
index 02ff0405d746b87aa8e3423d173bf0de1112d3d2..e75bb87ee92b87f39d67b883bb22f4f77fce3a79 100644 (file)
@@ -552,7 +552,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
                        /* needed for power consumption */
                        struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
 
-                       memset(&info, 0, sizeof(info));
                        /* directly from the descriptor */
                        info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
                        info.product = dev->product_id;
index cafbd15142754977a377806864628650ca5421ac..0025847743f30fc12500045e7980bd5568e4e83c 100644 (file)
@@ -2435,8 +2435,7 @@ sisusb_open(struct inode *inode, struct file *file)
        }
 
        if (!sisusb->devinit) {
-               if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
-                   sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
+               if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) {
                        if (sisusb_init_gfxdevice(sisusb, 0)) {
                                mutex_unlock(&sisusb->lock);
                                dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
@@ -3008,7 +3007,6 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 #else
                        x.sisusb_conactive  = 0;
 #endif
-                       memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
 
                        if (copy_to_user((void __user *)arg, &x, sizeof(x)))
                                retval = -EFAULT;
@@ -3169,7 +3167,7 @@ static int sisusb_probe(struct usb_interface *intf,
 
        sisusb->present = 1;
 
-       if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
+       if (dev->speed == USB_SPEED_HIGH) {
                int initscreen = 1;
 #ifdef INCL_SISUSB_CON
                if (sisusb_first_vc > 0 &&
@@ -3247,7 +3245,6 @@ static struct usb_device_id sisusb_table [] = {
        { USB_DEVICE(0x0711, 0x0902) },
        { USB_DEVICE(0x0711, 0x0903) },
        { USB_DEVICE(0x0711, 0x0918) },
-       { USB_DEVICE(0x0711, 0x0920) },
        { USB_DEVICE(0x182d, 0x021c) },
        { USB_DEVICE(0x182d, 0x0269) },
        { }
index d509dcb29b38a5e7579856ba9057dd974c02d717..2e14102955c5936def7394c9faea1306696f18bb 100644 (file)
@@ -85,7 +85,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed);
+static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed);
 
 static int tv_probe(struct usb_interface *interface,
                    const struct usb_device_id *id)
index 9650de93a3d40c6e67dce43f845bb393fab819be..06cb71942dc7ee87e52c56c1be0410d64a707be4 100644 (file)
@@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co
        change_color(led);                                              \
        return count;                                                   \
 }                                                                      \
-static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value);
+static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
 show_set(blue);
 show_set(red);
 show_set(green);
index cd8726c30444d7098230db70ef3cf0d9677c82a3..3db255537e7913d2de5d0b03781fc20768af2a91 100644 (file)
@@ -185,7 +185,7 @@ static ssize_t set_attr_##name(struct device *dev,          \
                                                                \
        return count;                                           \
 }                                                              \
-static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name);
+static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name);
 
 static ssize_t show_attr_text(struct device *dev,
        struct device_attribute *attr, char *buf)
@@ -216,7 +216,7 @@ static ssize_t set_attr_text(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text);
+static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text);
 
 static ssize_t show_attr_decimals(struct device *dev,
        struct device_attribute *attr, char *buf)
@@ -265,7 +265,8 @@ static ssize_t set_attr_decimals(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals);
+static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO,
+       show_attr_decimals, set_attr_decimals);
 
 static ssize_t show_attr_textmode(struct device *dev,
        struct device_attribute *attr, char *buf)
@@ -311,7 +312,8 @@ static ssize_t set_attr_textmode(struct device *dev,
        return -EINVAL;
 }
 
-static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode);
+static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO,
+       show_attr_textmode, set_attr_textmode);
 
 
 MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
index d6a2ef374d832447442db0207fd9a19967c94fd1..a9f06d76960ffa936858adaf321d5c53d39605dd 100644 (file)
@@ -1382,6 +1382,7 @@ static void iso_callback (struct urb *urb)
                        break;
                }
        }
+       simple_free_urb (urb);
 
        ctx->pending--;
        if (ctx->pending == 0) {
@@ -1498,7 +1499,6 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
                        }
 
                        simple_free_urb (urbs [i]);
-                       urbs[i] = NULL;
                        context.pending--;
                        context.submit_error = 1;
                        break;
@@ -1508,10 +1508,6 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
 
        wait_for_completion (&context.done);
 
-       for (i = 0; i < param->sglen; i++) {
-               if (urbs[i])
-                       simple_free_urb(urbs[i]);
-       }
        /*
         * Isochronous transfers are expected to fail sometimes.  As an
         * arbitrary limit, we will report an error if any submissions
index 9231b25d725a0b18e0208d55d914e31ac850304d..10f3205798e880ce4f1351735666bac903e34398 100644 (file)
@@ -971,7 +971,7 @@ static int mon_bin_ioctl(struct inode *inode, struct file *file,
 
                mutex_lock(&rp->fetch_lock);
                spin_lock_irqsave(&rp->b_lock, flags);
-               mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
+               mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
                kfree(rp->b_vec);
                rp->b_vec  = vec;
                rp->b_size = size;
index 51e8f0f734a0028e0f82f78e6e29a3bde26c79d4..fcec87ea709e6d682f4c045d79c19c2c4a1fbcda 100644 (file)
@@ -248,10 +248,8 @@ int __init musb_platform_init(struct musb *musb)
 
        usb_nop_xceiv_register();
        musb->xceiv = otg_get_transceiver();
-       if (!musb->xceiv) {
-               gpio_free(musb->config->gpio_vrsel);
+       if (!musb->xceiv)
                return -ENODEV;
-       }
 
        if (ANOMALY_05000346) {
                bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
index c6f5ee4575cf26ce8ab8d5fcdcee8aa04648c905..74073f9a43f09cafc6afed82e5a9cadd17304d62 100644 (file)
@@ -577,19 +577,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 {
        const u8                epnum = req->epnum;
        struct usb_request      *request = &req->request;
-       struct musb_ep          *musb_ep;
+       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        unsigned                fifo_count = 0;
-       u16                     len;
+       u16                     len = musb_ep->packet_sz;
        u16                     csr = musb_readw(epio, MUSB_RXCSR);
-       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
-
-       if (hw_ep->is_shared_fifo)
-               musb_ep = &hw_ep->ep_in;
-       else
-               musb_ep = &hw_ep->ep_out;
-
-       len = musb_ep->packet_sz;
 
        /* We shouldn't get here while DMA is active, but we do... */
        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -757,15 +749,9 @@ void musb_g_rx(struct musb *musb, u8 epnum)
        u16                     csr;
        struct usb_request      *request;
        void __iomem            *mbase = musb->mregs;
-       struct musb_ep          *musb_ep;
+       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        struct dma_channel      *dma;
-       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
-
-       if (hw_ep->is_shared_fifo)
-               musb_ep = &hw_ep->ep_in;
-       else
-               musb_ep = &hw_ep->ep_out;
 
        musb_ep_select(mbase, epnum);
 
@@ -1088,7 +1074,7 @@ struct free_record {
 /*
  * Context: controller locked, IRQs blocked.
  */
-void musb_ep_restart(struct musb *musb, struct musb_request *req)
+static void musb_ep_restart(struct musb *musb, struct musb_request *req)
 {
        DBG(3, "<== %s request %p len %u on hw_ep%d\n",
                req->tx ? "TX/IN" : "RX/OUT",
index 76711f2a451bce034013e1c4168031acef6004a9..59502da9f739ce179bca5faa549147d6c34ad078 100644 (file)
@@ -105,6 +105,4 @@ extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
 
 extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
 
-extern void musb_ep_restart(struct musb *, struct musb_request *);
-
 #endif         /* __MUSB_GADGET_H */
index 53c04448a8b89dac89e4f4764ee63837a03fac13..067e5a95b1491334db8fdbc76554f2f0aae634e3 100644 (file)
@@ -369,7 +369,6 @@ stall:
                                        ctrlrequest->wIndex & 0x0f;
                                struct musb_ep          *musb_ep;
                                struct musb_hw_ep       *ep;
-                               struct musb_request     *request;
                                void __iomem            *regs;
                                int                     is_in;
                                u16                     csr;
@@ -412,14 +411,6 @@ stall:
                                                        csr);
                                }
 
-                               /* Maybe start the first request in the queue */
-                               request = to_musb_request(
-                                               next_request(musb_ep));
-                               if (!musb_ep->busy && request) {
-                                       DBG(3, "restarting the request\n");
-                                       musb_ep_restart(musb, request);
-                               }
-
                                /* select ep0 again */
                                musb_ep_select(mbase, 0);
                                handled = 1;
index 9f8f0d0443c73001bf4397fcfb19046415e0c136..bd254ec97d14be6d9b9e4db7e64bfbbca1f7d14c 100644 (file)
@@ -56,14 +56,11 @@ static int cp210x_carrier_raised(struct usb_serial_port *p);
 static int debug;
 
 static struct usb_device_id id_table [] = {
-       { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
        { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
        { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
        { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
        { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
        { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
-       { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
-       { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
        { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
        { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
        { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
@@ -75,12 +72,9 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
        { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
        { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
-       { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
-       { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
        { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
        { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
        { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
-       { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
        { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
        { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
        { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
@@ -88,35 +82,28 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
        { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
        { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
-       { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
        { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
-       { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
-       { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
        { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
-       { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
        { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
        { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
        { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
-       { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
        { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
        { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
        { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
-       { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */
+       { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
        { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
        { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
        { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
-       { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
-       { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
        { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
        { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
        { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -127,14 +114,7 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
        { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
-       { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
-       { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
-       { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
-       { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
-       { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
-       { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
-       { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
@@ -227,8 +207,8 @@ static struct usb_serial_driver cp210x_device = {
 #define BITS_STOP_2            0x0002
 
 /* CP210X_SET_BREAK */
-#define BREAK_ON               0x0001
-#define BREAK_OFF              0x0000
+#define BREAK_ON               0x0000
+#define BREAK_OFF              0x0001
 
 /* CP210X_(SET_MHS|GET_MDMSTS) */
 #define CONTROL_DTR            0x0001
index d5556349db3b46044cc53ef0810d3d78c6e7eeaa..13a1b39f1590fdc96a16be67ca8cd5299797c0da 100644 (file)
 #include <linux/serial.h>
 #include <linux/usb/serial.h>
 #include "ftdi_sio.h"
-#include "ftdi_sio_ids.h"
 
 /*
  * Version Information
  */
 #define DRIVER_VERSION "v1.5.0"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
 #define DRIVER_DESC "USB FTDI Serial Converters Driver"
 
 static int debug;
@@ -145,15 +144,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
 
 
 
-/*
- * Device ID not listed? Test via module params product/vendor or
- * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
- */
 static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -162,9 +156,6 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
@@ -182,11 +173,9 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
-       { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -206,7 +195,6 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
-       { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
        { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
@@ -563,16 +551,9 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
        /*
-        * ELV devices:
+        * Due to many user requests for multiple ELV devices we enable
+        * them by default.
         */
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -589,17 +570,11 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
        { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
        { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
        { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -620,7 +595,6 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
        { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
-       { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -664,7 +638,6 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
        { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
        { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
-       { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
@@ -682,6 +655,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
        { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
+       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -702,8 +676,6 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
                .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
-       { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
-       { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
@@ -723,37 +695,7 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
        { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
-
-       /* Papouch devices based on FTDI chip */
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) },
        { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) },
-       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) },
-
        { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
        { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
@@ -775,35 +717,6 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
        { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
-       { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
-       { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
-       { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
-       { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
-       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID),
-               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID),
-               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID),
-               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-       { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
-               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-       { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
-       { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) },
-       { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
-               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
-       { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
-       { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
-               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
@@ -1458,7 +1371,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
        }
 
        /* set max packet size based on descriptor */
-       priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
+       priv->max_packet_size = ep_desc->wMaxPacketSize;
 
        dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
 }
index b0e0d64f822e27b9bcf0c076d34e67ddecdebea5..4586a24fafb020c5f128c6b385cc10d1611b887c 100644 (file)
@@ -1,10 +1,7 @@
 /*
- * Driver definitions for the FTDI USB Single Port Serial Converter -
+ * Definitions for the FTDI USB Single Port Serial Converter -
  * known as FTDI_SIO (Serial Input/Output application of the chipset)
  *
- * For USB vendor/product IDs (VID/PID), please see ftdi_sio_ids.h
- *
- *
  * The example I have is known as the USC-1000 which is available from
  * http://www.dse.co.nz - cat no XH4214 It looks similar to this:
  * http://www.dansdata.com/usbser.htm but I can't be sure There are other
  * Bill Ryder - bryder@sgi.com formerly of Silicon Graphics, Inc.- wrote the
  * FTDI_SIO implementation.
  *
+ * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
+ * from Rudolf Gugler
+ *
+ */
+
+#define FTDI_VID       0x0403  /* Vendor Id */
+#define FTDI_SIO_PID   0x8372  /* Product Id SIO application of 8U100AX  */
+#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
+#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
+#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
+#define FTDI_232RL_PID  0xFBFA  /* Product ID for FT232RL */
+#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+#define FTDI_RELAIS_PID        0xFA10  /* Relais device from Rudolf Gugler */
+#define FTDI_NF_RIC_VID        0x0DCD  /* Vendor Id */
+#define FTDI_NF_RIC_PID        0x0001  /* Product Id */
+#define FTDI_USBX_707_PID 0xF857       /* ADSTech IR Blaster USBX-707 */
+
+/* Larsen and Brusgaard AltiTrack/USBtrack  */
+#define LARSENBRUSGAARD_VID            0x0FD8
+#define LB_ALTITRACK_PID               0x0001
+
+/* www.canusb.com Lawicel CANUSB device */
+#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
+
+/* AlphaMicro Components AMC-232USB01 device */
+#define FTDI_AMC232_PID 0xFF00 /* Product Id */
+
+/* www.candapter.com Ewert Energy Systems CANdapter device */
+#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+
+/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
+/* the VID is the standard ftdi vid (FTDI_VID) */
+#define FTDI_SCS_DEVICE_0_PID 0xD010    /* SCS PTC-IIusb */
+#define FTDI_SCS_DEVICE_1_PID 0xD011    /* SCS Tracker / DSP TNC */
+#define FTDI_SCS_DEVICE_2_PID 0xD012
+#define FTDI_SCS_DEVICE_3_PID 0xD013
+#define FTDI_SCS_DEVICE_4_PID 0xD014
+#define FTDI_SCS_DEVICE_5_PID 0xD015
+#define FTDI_SCS_DEVICE_6_PID 0xD016
+#define FTDI_SCS_DEVICE_7_PID 0xD017
+
+/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */
+#define FTDI_ACTZWAVE_PID      0xF2D0
+
+
+/* www.starting-point-systems.com µChameleon device */
+#define FTDI_MICRO_CHAMELEON_PID       0xCAA0  /* Product Id */
+
+/* www.irtrans.de device */
+#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
+
+
+/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
+#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
+
+/* iPlus device */
+#define FTDI_IPLUS_PID 0xD070 /* Product Id */
+#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
+
+/* DMX4ALL DMX Interfaces */
+#define FTDI_DMX4ALL 0xC850
+
+/* OpenDCC (www.opendcc.de) product id */
+#define FTDI_OPENDCC_PID       0xBFD8
+#define FTDI_OPENDCC_SNIFFER_PID       0xBFD9
+#define FTDI_OPENDCC_THROTTLE_PID      0xBFDA
+#define FTDI_OPENDCC_GATEWAY_PID       0xBFDB
+
+/* Sprog II (Andrew Crosland's SprogII DCC interface) */
+#define FTDI_SPROG_II          0xF0C8
+
+/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
+/* they use the ftdi chipset for the USB interface and the vendor id is the same */
+#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
+#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
+#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
+#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
+#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
+#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
+#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
+#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
+
+/* Video Networks Limited / Homechoice in the UK use an ftdi-based device for their 1Mb */
+/* broadband internet service.  The following PID is exhibited by the usb device supplied */
+/* (the VID is the standard ftdi vid (FTDI_VID) */
+#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
+
+/*
+ * PCDJ use ftdi based dj-controllers.  The following PID is for their DAC-2 device
+ * http://www.pcdjhardware.com/DAC2.asp (PID sent by Wouter Paesen)
+ * (the VID is the standard ftdi vid (FTDI_VID) */
+#define FTDI_PCDJ_DAC2_PID 0xFA88
+
+/*
+ * The following are the values for the Matrix Orbital LCD displays,
+ * which are the FT232BM ( similar to the 8U232AM )
+ */
+#define FTDI_MTXORB_0_PID      0xFA00  /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_1_PID      0xFA01  /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_2_PID      0xFA02  /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_3_PID      0xFA03  /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_4_PID      0xFA04  /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_5_PID      0xFA05  /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_6_PID      0xFA06  /* Matrix Orbital Product Id */
+
+/* OOCDlink by Joern Kaipf <joernk@web.de>
+ * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
+#define FTDI_OOCDLINK_PID      0xbaf8  /* Amontec JTAGkey */
+
+/*
+ * The following are the values for the Matrix Orbital FTDI Range
+ * Anything in this range will use an FT232RL.
+ */
+#define MTXORB_VID                     0x1B3D
+#define MTXORB_FTDI_RANGE_0100_PID     0x0100
+#define MTXORB_FTDI_RANGE_0101_PID     0x0101
+#define MTXORB_FTDI_RANGE_0102_PID     0x0102
+#define MTXORB_FTDI_RANGE_0103_PID     0x0103
+#define MTXORB_FTDI_RANGE_0104_PID     0x0104
+#define MTXORB_FTDI_RANGE_0105_PID     0x0105
+#define MTXORB_FTDI_RANGE_0106_PID     0x0106
+#define MTXORB_FTDI_RANGE_0107_PID     0x0107
+#define MTXORB_FTDI_RANGE_0108_PID     0x0108
+#define MTXORB_FTDI_RANGE_0109_PID     0x0109
+#define MTXORB_FTDI_RANGE_010A_PID     0x010A
+#define MTXORB_FTDI_RANGE_010B_PID     0x010B
+#define MTXORB_FTDI_RANGE_010C_PID     0x010C
+#define MTXORB_FTDI_RANGE_010D_PID     0x010D
+#define MTXORB_FTDI_RANGE_010E_PID     0x010E
+#define MTXORB_FTDI_RANGE_010F_PID     0x010F
+#define MTXORB_FTDI_RANGE_0110_PID     0x0110
+#define MTXORB_FTDI_RANGE_0111_PID     0x0111
+#define MTXORB_FTDI_RANGE_0112_PID     0x0112
+#define MTXORB_FTDI_RANGE_0113_PID     0x0113
+#define MTXORB_FTDI_RANGE_0114_PID     0x0114
+#define MTXORB_FTDI_RANGE_0115_PID     0x0115
+#define MTXORB_FTDI_RANGE_0116_PID     0x0116
+#define MTXORB_FTDI_RANGE_0117_PID     0x0117
+#define MTXORB_FTDI_RANGE_0118_PID     0x0118
+#define MTXORB_FTDI_RANGE_0119_PID     0x0119
+#define MTXORB_FTDI_RANGE_011A_PID     0x011A
+#define MTXORB_FTDI_RANGE_011B_PID     0x011B
+#define MTXORB_FTDI_RANGE_011C_PID     0x011C
+#define MTXORB_FTDI_RANGE_011D_PID     0x011D
+#define MTXORB_FTDI_RANGE_011E_PID     0x011E
+#define MTXORB_FTDI_RANGE_011F_PID     0x011F
+#define MTXORB_FTDI_RANGE_0120_PID     0x0120
+#define MTXORB_FTDI_RANGE_0121_PID     0x0121
+#define MTXORB_FTDI_RANGE_0122_PID     0x0122
+#define MTXORB_FTDI_RANGE_0123_PID     0x0123
+#define MTXORB_FTDI_RANGE_0124_PID     0x0124
+#define MTXORB_FTDI_RANGE_0125_PID     0x0125
+#define MTXORB_FTDI_RANGE_0126_PID     0x0126
+#define MTXORB_FTDI_RANGE_0127_PID     0x0127
+#define MTXORB_FTDI_RANGE_0128_PID     0x0128
+#define MTXORB_FTDI_RANGE_0129_PID     0x0129
+#define MTXORB_FTDI_RANGE_012A_PID     0x012A
+#define MTXORB_FTDI_RANGE_012B_PID     0x012B
+#define MTXORB_FTDI_RANGE_012C_PID     0x012C
+#define MTXORB_FTDI_RANGE_012D_PID     0x012D
+#define MTXORB_FTDI_RANGE_012E_PID     0x012E
+#define MTXORB_FTDI_RANGE_012F_PID     0x012F
+#define MTXORB_FTDI_RANGE_0130_PID     0x0130
+#define MTXORB_FTDI_RANGE_0131_PID     0x0131
+#define MTXORB_FTDI_RANGE_0132_PID     0x0132
+#define MTXORB_FTDI_RANGE_0133_PID     0x0133
+#define MTXORB_FTDI_RANGE_0134_PID     0x0134
+#define MTXORB_FTDI_RANGE_0135_PID     0x0135
+#define MTXORB_FTDI_RANGE_0136_PID     0x0136
+#define MTXORB_FTDI_RANGE_0137_PID     0x0137
+#define MTXORB_FTDI_RANGE_0138_PID     0x0138
+#define MTXORB_FTDI_RANGE_0139_PID     0x0139
+#define MTXORB_FTDI_RANGE_013A_PID     0x013A
+#define MTXORB_FTDI_RANGE_013B_PID     0x013B
+#define MTXORB_FTDI_RANGE_013C_PID     0x013C
+#define MTXORB_FTDI_RANGE_013D_PID     0x013D
+#define MTXORB_FTDI_RANGE_013E_PID     0x013E
+#define MTXORB_FTDI_RANGE_013F_PID     0x013F
+#define MTXORB_FTDI_RANGE_0140_PID     0x0140
+#define MTXORB_FTDI_RANGE_0141_PID     0x0141
+#define MTXORB_FTDI_RANGE_0142_PID     0x0142
+#define MTXORB_FTDI_RANGE_0143_PID     0x0143
+#define MTXORB_FTDI_RANGE_0144_PID     0x0144
+#define MTXORB_FTDI_RANGE_0145_PID     0x0145
+#define MTXORB_FTDI_RANGE_0146_PID     0x0146
+#define MTXORB_FTDI_RANGE_0147_PID     0x0147
+#define MTXORB_FTDI_RANGE_0148_PID     0x0148
+#define MTXORB_FTDI_RANGE_0149_PID     0x0149
+#define MTXORB_FTDI_RANGE_014A_PID     0x014A
+#define MTXORB_FTDI_RANGE_014B_PID     0x014B
+#define MTXORB_FTDI_RANGE_014C_PID     0x014C
+#define MTXORB_FTDI_RANGE_014D_PID     0x014D
+#define MTXORB_FTDI_RANGE_014E_PID     0x014E
+#define MTXORB_FTDI_RANGE_014F_PID     0x014F
+#define MTXORB_FTDI_RANGE_0150_PID     0x0150
+#define MTXORB_FTDI_RANGE_0151_PID     0x0151
+#define MTXORB_FTDI_RANGE_0152_PID     0x0152
+#define MTXORB_FTDI_RANGE_0153_PID     0x0153
+#define MTXORB_FTDI_RANGE_0154_PID     0x0154
+#define MTXORB_FTDI_RANGE_0155_PID     0x0155
+#define MTXORB_FTDI_RANGE_0156_PID     0x0156
+#define MTXORB_FTDI_RANGE_0157_PID     0x0157
+#define MTXORB_FTDI_RANGE_0158_PID     0x0158
+#define MTXORB_FTDI_RANGE_0159_PID     0x0159
+#define MTXORB_FTDI_RANGE_015A_PID     0x015A
+#define MTXORB_FTDI_RANGE_015B_PID     0x015B
+#define MTXORB_FTDI_RANGE_015C_PID     0x015C
+#define MTXORB_FTDI_RANGE_015D_PID     0x015D
+#define MTXORB_FTDI_RANGE_015E_PID     0x015E
+#define MTXORB_FTDI_RANGE_015F_PID     0x015F
+#define MTXORB_FTDI_RANGE_0160_PID     0x0160
+#define MTXORB_FTDI_RANGE_0161_PID     0x0161
+#define MTXORB_FTDI_RANGE_0162_PID     0x0162
+#define MTXORB_FTDI_RANGE_0163_PID     0x0163
+#define MTXORB_FTDI_RANGE_0164_PID     0x0164
+#define MTXORB_FTDI_RANGE_0165_PID     0x0165
+#define MTXORB_FTDI_RANGE_0166_PID     0x0166
+#define MTXORB_FTDI_RANGE_0167_PID     0x0167
+#define MTXORB_FTDI_RANGE_0168_PID     0x0168
+#define MTXORB_FTDI_RANGE_0169_PID     0x0169
+#define MTXORB_FTDI_RANGE_016A_PID     0x016A
+#define MTXORB_FTDI_RANGE_016B_PID     0x016B
+#define MTXORB_FTDI_RANGE_016C_PID     0x016C
+#define MTXORB_FTDI_RANGE_016D_PID     0x016D
+#define MTXORB_FTDI_RANGE_016E_PID     0x016E
+#define MTXORB_FTDI_RANGE_016F_PID     0x016F
+#define MTXORB_FTDI_RANGE_0170_PID     0x0170
+#define MTXORB_FTDI_RANGE_0171_PID     0x0171
+#define MTXORB_FTDI_RANGE_0172_PID     0x0172
+#define MTXORB_FTDI_RANGE_0173_PID     0x0173
+#define MTXORB_FTDI_RANGE_0174_PID     0x0174
+#define MTXORB_FTDI_RANGE_0175_PID     0x0175
+#define MTXORB_FTDI_RANGE_0176_PID     0x0176
+#define MTXORB_FTDI_RANGE_0177_PID     0x0177
+#define MTXORB_FTDI_RANGE_0178_PID     0x0178
+#define MTXORB_FTDI_RANGE_0179_PID     0x0179
+#define MTXORB_FTDI_RANGE_017A_PID     0x017A
+#define MTXORB_FTDI_RANGE_017B_PID     0x017B
+#define MTXORB_FTDI_RANGE_017C_PID     0x017C
+#define MTXORB_FTDI_RANGE_017D_PID     0x017D
+#define MTXORB_FTDI_RANGE_017E_PID     0x017E
+#define MTXORB_FTDI_RANGE_017F_PID     0x017F
+#define MTXORB_FTDI_RANGE_0180_PID     0x0180
+#define MTXORB_FTDI_RANGE_0181_PID     0x0181
+#define MTXORB_FTDI_RANGE_0182_PID     0x0182
+#define MTXORB_FTDI_RANGE_0183_PID     0x0183
+#define MTXORB_FTDI_RANGE_0184_PID     0x0184
+#define MTXORB_FTDI_RANGE_0185_PID     0x0185
+#define MTXORB_FTDI_RANGE_0186_PID     0x0186
+#define MTXORB_FTDI_RANGE_0187_PID     0x0187
+#define MTXORB_FTDI_RANGE_0188_PID     0x0188
+#define MTXORB_FTDI_RANGE_0189_PID     0x0189
+#define MTXORB_FTDI_RANGE_018A_PID     0x018A
+#define MTXORB_FTDI_RANGE_018B_PID     0x018B
+#define MTXORB_FTDI_RANGE_018C_PID     0x018C
+#define MTXORB_FTDI_RANGE_018D_PID     0x018D
+#define MTXORB_FTDI_RANGE_018E_PID     0x018E
+#define MTXORB_FTDI_RANGE_018F_PID     0x018F
+#define MTXORB_FTDI_RANGE_0190_PID     0x0190
+#define MTXORB_FTDI_RANGE_0191_PID     0x0191
+#define MTXORB_FTDI_RANGE_0192_PID     0x0192
+#define MTXORB_FTDI_RANGE_0193_PID     0x0193
+#define MTXORB_FTDI_RANGE_0194_PID     0x0194
+#define MTXORB_FTDI_RANGE_0195_PID     0x0195
+#define MTXORB_FTDI_RANGE_0196_PID     0x0196
+#define MTXORB_FTDI_RANGE_0197_PID     0x0197
+#define MTXORB_FTDI_RANGE_0198_PID     0x0198
+#define MTXORB_FTDI_RANGE_0199_PID     0x0199
+#define MTXORB_FTDI_RANGE_019A_PID     0x019A
+#define MTXORB_FTDI_RANGE_019B_PID     0x019B
+#define MTXORB_FTDI_RANGE_019C_PID     0x019C
+#define MTXORB_FTDI_RANGE_019D_PID     0x019D
+#define MTXORB_FTDI_RANGE_019E_PID     0x019E
+#define MTXORB_FTDI_RANGE_019F_PID     0x019F
+#define MTXORB_FTDI_RANGE_01A0_PID     0x01A0
+#define MTXORB_FTDI_RANGE_01A1_PID     0x01A1
+#define MTXORB_FTDI_RANGE_01A2_PID     0x01A2
+#define MTXORB_FTDI_RANGE_01A3_PID     0x01A3
+#define MTXORB_FTDI_RANGE_01A4_PID     0x01A4
+#define MTXORB_FTDI_RANGE_01A5_PID     0x01A5
+#define MTXORB_FTDI_RANGE_01A6_PID     0x01A6
+#define MTXORB_FTDI_RANGE_01A7_PID     0x01A7
+#define MTXORB_FTDI_RANGE_01A8_PID     0x01A8
+#define MTXORB_FTDI_RANGE_01A9_PID     0x01A9
+#define MTXORB_FTDI_RANGE_01AA_PID     0x01AA
+#define MTXORB_FTDI_RANGE_01AB_PID     0x01AB
+#define MTXORB_FTDI_RANGE_01AC_PID     0x01AC
+#define MTXORB_FTDI_RANGE_01AD_PID     0x01AD
+#define MTXORB_FTDI_RANGE_01AE_PID     0x01AE
+#define MTXORB_FTDI_RANGE_01AF_PID     0x01AF
+#define MTXORB_FTDI_RANGE_01B0_PID     0x01B0
+#define MTXORB_FTDI_RANGE_01B1_PID     0x01B1
+#define MTXORB_FTDI_RANGE_01B2_PID     0x01B2
+#define MTXORB_FTDI_RANGE_01B3_PID     0x01B3
+#define MTXORB_FTDI_RANGE_01B4_PID     0x01B4
+#define MTXORB_FTDI_RANGE_01B5_PID     0x01B5
+#define MTXORB_FTDI_RANGE_01B6_PID     0x01B6
+#define MTXORB_FTDI_RANGE_01B7_PID     0x01B7
+#define MTXORB_FTDI_RANGE_01B8_PID     0x01B8
+#define MTXORB_FTDI_RANGE_01B9_PID     0x01B9
+#define MTXORB_FTDI_RANGE_01BA_PID     0x01BA
+#define MTXORB_FTDI_RANGE_01BB_PID     0x01BB
+#define MTXORB_FTDI_RANGE_01BC_PID     0x01BC
+#define MTXORB_FTDI_RANGE_01BD_PID     0x01BD
+#define MTXORB_FTDI_RANGE_01BE_PID     0x01BE
+#define MTXORB_FTDI_RANGE_01BF_PID     0x01BF
+#define MTXORB_FTDI_RANGE_01C0_PID     0x01C0
+#define MTXORB_FTDI_RANGE_01C1_PID     0x01C1
+#define MTXORB_FTDI_RANGE_01C2_PID     0x01C2
+#define MTXORB_FTDI_RANGE_01C3_PID     0x01C3
+#define MTXORB_FTDI_RANGE_01C4_PID     0x01C4
+#define MTXORB_FTDI_RANGE_01C5_PID     0x01C5
+#define MTXORB_FTDI_RANGE_01C6_PID     0x01C6
+#define MTXORB_FTDI_RANGE_01C7_PID     0x01C7
+#define MTXORB_FTDI_RANGE_01C8_PID     0x01C8
+#define MTXORB_FTDI_RANGE_01C9_PID     0x01C9
+#define MTXORB_FTDI_RANGE_01CA_PID     0x01CA
+#define MTXORB_FTDI_RANGE_01CB_PID     0x01CB
+#define MTXORB_FTDI_RANGE_01CC_PID     0x01CC
+#define MTXORB_FTDI_RANGE_01CD_PID     0x01CD
+#define MTXORB_FTDI_RANGE_01CE_PID     0x01CE
+#define MTXORB_FTDI_RANGE_01CF_PID     0x01CF
+#define MTXORB_FTDI_RANGE_01D0_PID     0x01D0
+#define MTXORB_FTDI_RANGE_01D1_PID     0x01D1
+#define MTXORB_FTDI_RANGE_01D2_PID     0x01D2
+#define MTXORB_FTDI_RANGE_01D3_PID     0x01D3
+#define MTXORB_FTDI_RANGE_01D4_PID     0x01D4
+#define MTXORB_FTDI_RANGE_01D5_PID     0x01D5
+#define MTXORB_FTDI_RANGE_01D6_PID     0x01D6
+#define MTXORB_FTDI_RANGE_01D7_PID     0x01D7
+#define MTXORB_FTDI_RANGE_01D8_PID     0x01D8
+#define MTXORB_FTDI_RANGE_01D9_PID     0x01D9
+#define MTXORB_FTDI_RANGE_01DA_PID     0x01DA
+#define MTXORB_FTDI_RANGE_01DB_PID     0x01DB
+#define MTXORB_FTDI_RANGE_01DC_PID     0x01DC
+#define MTXORB_FTDI_RANGE_01DD_PID     0x01DD
+#define MTXORB_FTDI_RANGE_01DE_PID     0x01DE
+#define MTXORB_FTDI_RANGE_01DF_PID     0x01DF
+#define MTXORB_FTDI_RANGE_01E0_PID     0x01E0
+#define MTXORB_FTDI_RANGE_01E1_PID     0x01E1
+#define MTXORB_FTDI_RANGE_01E2_PID     0x01E2
+#define MTXORB_FTDI_RANGE_01E3_PID     0x01E3
+#define MTXORB_FTDI_RANGE_01E4_PID     0x01E4
+#define MTXORB_FTDI_RANGE_01E5_PID     0x01E5
+#define MTXORB_FTDI_RANGE_01E6_PID     0x01E6
+#define MTXORB_FTDI_RANGE_01E7_PID     0x01E7
+#define MTXORB_FTDI_RANGE_01E8_PID     0x01E8
+#define MTXORB_FTDI_RANGE_01E9_PID     0x01E9
+#define MTXORB_FTDI_RANGE_01EA_PID     0x01EA
+#define MTXORB_FTDI_RANGE_01EB_PID     0x01EB
+#define MTXORB_FTDI_RANGE_01EC_PID     0x01EC
+#define MTXORB_FTDI_RANGE_01ED_PID     0x01ED
+#define MTXORB_FTDI_RANGE_01EE_PID     0x01EE
+#define MTXORB_FTDI_RANGE_01EF_PID     0x01EF
+#define MTXORB_FTDI_RANGE_01F0_PID     0x01F0
+#define MTXORB_FTDI_RANGE_01F1_PID     0x01F1
+#define MTXORB_FTDI_RANGE_01F2_PID     0x01F2
+#define MTXORB_FTDI_RANGE_01F3_PID     0x01F3
+#define MTXORB_FTDI_RANGE_01F4_PID     0x01F4
+#define MTXORB_FTDI_RANGE_01F5_PID     0x01F5
+#define MTXORB_FTDI_RANGE_01F6_PID     0x01F6
+#define MTXORB_FTDI_RANGE_01F7_PID     0x01F7
+#define MTXORB_FTDI_RANGE_01F8_PID     0x01F8
+#define MTXORB_FTDI_RANGE_01F9_PID     0x01F9
+#define MTXORB_FTDI_RANGE_01FA_PID     0x01FA
+#define MTXORB_FTDI_RANGE_01FB_PID     0x01FB
+#define MTXORB_FTDI_RANGE_01FC_PID     0x01FC
+#define MTXORB_FTDI_RANGE_01FD_PID     0x01FD
+#define MTXORB_FTDI_RANGE_01FE_PID     0x01FE
+#define MTXORB_FTDI_RANGE_01FF_PID     0x01FF
+
+
+
+/* Interbiometrics USB I/O Board */
+/* Developed for Interbiometrics by Rudolf Gugler */
+#define INTERBIOMETRICS_VID              0x1209
+#define INTERBIOMETRICS_IOBOARD_PID      0x1002
+#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
+
+/*
+ * The following are the values for the Perle Systems
+ * UltraPort USB serial converters
+ */
+#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0        /* Perle UltraPort Product Id */
+
+/*
+ * The following are the values for the Sealevel SeaLINK+ adapters.
+ * (Original list sent by Tuan Hoang.  Ian Abbott renamed the macros and
+ * removed some PIDs that don't seem to match any existing products.)
+ */
+#define SEALEVEL_VID           0x0c52  /* Sealevel Vendor ID */
+#define SEALEVEL_2101_PID      0x2101  /* SeaLINK+232 (2101/2105) */
+#define SEALEVEL_2102_PID      0x2102  /* SeaLINK+485 (2102) */
+#define SEALEVEL_2103_PID      0x2103  /* SeaLINK+232I (2103) */
+#define SEALEVEL_2104_PID      0x2104  /* SeaLINK+485I (2104) */
+#define SEALEVEL_2106_PID      0x9020  /* SeaLINK+422 (2106) */
+#define SEALEVEL_2201_1_PID    0x2211  /* SeaPORT+2/232 (2201) Port 1 */
+#define SEALEVEL_2201_2_PID    0x2221  /* SeaPORT+2/232 (2201) Port 2 */
+#define SEALEVEL_2202_1_PID    0x2212  /* SeaPORT+2/485 (2202) Port 1 */
+#define SEALEVEL_2202_2_PID    0x2222  /* SeaPORT+2/485 (2202) Port 2 */
+#define SEALEVEL_2203_1_PID    0x2213  /* SeaPORT+2 (2203) Port 1 */
+#define SEALEVEL_2203_2_PID    0x2223  /* SeaPORT+2 (2203) Port 2 */
+#define SEALEVEL_2401_1_PID    0x2411  /* SeaPORT+4/232 (2401) Port 1 */
+#define SEALEVEL_2401_2_PID    0x2421  /* SeaPORT+4/232 (2401) Port 2 */
+#define SEALEVEL_2401_3_PID    0x2431  /* SeaPORT+4/232 (2401) Port 3 */
+#define SEALEVEL_2401_4_PID    0x2441  /* SeaPORT+4/232 (2401) Port 4 */
+#define SEALEVEL_2402_1_PID    0x2412  /* SeaPORT+4/485 (2402) Port 1 */
+#define SEALEVEL_2402_2_PID    0x2422  /* SeaPORT+4/485 (2402) Port 2 */
+#define SEALEVEL_2402_3_PID    0x2432  /* SeaPORT+4/485 (2402) Port 3 */
+#define SEALEVEL_2402_4_PID    0x2442  /* SeaPORT+4/485 (2402) Port 4 */
+#define SEALEVEL_2403_1_PID    0x2413  /* SeaPORT+4 (2403) Port 1 */
+#define SEALEVEL_2403_2_PID    0x2423  /* SeaPORT+4 (2403) Port 2 */
+#define SEALEVEL_2403_3_PID    0x2433  /* SeaPORT+4 (2403) Port 3 */
+#define SEALEVEL_2403_4_PID    0x2443  /* SeaPORT+4 (2403) Port 4 */
+#define SEALEVEL_2801_1_PID    0X2811  /* SeaLINK+8/232 (2801) Port 1 */
+#define SEALEVEL_2801_2_PID    0X2821  /* SeaLINK+8/232 (2801) Port 2 */
+#define SEALEVEL_2801_3_PID    0X2831  /* SeaLINK+8/232 (2801) Port 3 */
+#define SEALEVEL_2801_4_PID    0X2841  /* SeaLINK+8/232 (2801) Port 4 */
+#define SEALEVEL_2801_5_PID    0X2851  /* SeaLINK+8/232 (2801) Port 5 */
+#define SEALEVEL_2801_6_PID    0X2861  /* SeaLINK+8/232 (2801) Port 6 */
+#define SEALEVEL_2801_7_PID    0X2871  /* SeaLINK+8/232 (2801) Port 7 */
+#define SEALEVEL_2801_8_PID    0X2881  /* SeaLINK+8/232 (2801) Port 8 */
+#define SEALEVEL_2802_1_PID    0X2812  /* SeaLINK+8/485 (2802) Port 1 */
+#define SEALEVEL_2802_2_PID    0X2822  /* SeaLINK+8/485 (2802) Port 2 */
+#define SEALEVEL_2802_3_PID    0X2832  /* SeaLINK+8/485 (2802) Port 3 */
+#define SEALEVEL_2802_4_PID    0X2842  /* SeaLINK+8/485 (2802) Port 4 */
+#define SEALEVEL_2802_5_PID    0X2852  /* SeaLINK+8/485 (2802) Port 5 */
+#define SEALEVEL_2802_6_PID    0X2862  /* SeaLINK+8/485 (2802) Port 6 */
+#define SEALEVEL_2802_7_PID    0X2872  /* SeaLINK+8/485 (2802) Port 7 */
+#define SEALEVEL_2802_8_PID    0X2882  /* SeaLINK+8/485 (2802) Port 8 */
+#define SEALEVEL_2803_1_PID    0X2813  /* SeaLINK+8 (2803) Port 1 */
+#define SEALEVEL_2803_2_PID    0X2823  /* SeaLINK+8 (2803) Port 2 */
+#define SEALEVEL_2803_3_PID    0X2833  /* SeaLINK+8 (2803) Port 3 */
+#define SEALEVEL_2803_4_PID    0X2843  /* SeaLINK+8 (2803) Port 4 */
+#define SEALEVEL_2803_5_PID    0X2853  /* SeaLINK+8 (2803) Port 5 */
+#define SEALEVEL_2803_6_PID    0X2863  /* SeaLINK+8 (2803) Port 6 */
+#define SEALEVEL_2803_7_PID    0X2873  /* SeaLINK+8 (2803) Port 7 */
+#define SEALEVEL_2803_8_PID    0X2883  /* SeaLINK+8 (2803) Port 8 */
+
+/*
+ * The following are the values for two KOBIL chipcard terminals.
+ */
+#define KOBIL_VID              0x0d46  /* KOBIL Vendor ID */
+#define KOBIL_CONV_B1_PID      0x2020  /* KOBIL Konverter for B1 */
+#define KOBIL_CONV_KAAN_PID    0x2021  /* KOBIL_Konverter for KAAN */
+
+/*
+ * Icom ID-1 digital transceiver
+ */
+
+#define ICOM_ID1_VID            0x0C26
+#define ICOM_ID1_PID            0x0004
+
+/*
+ * ASK.fr devices
+ */
+#define FTDI_ASK_RDR400_PID    0xC991  /* ASK RDR 400 series card reader */
+
+/*
+ * FTDI USB UART chips used in construction projects from the
+ * Elektor Electronics magazine (http://elektor-electronics.co.uk)
+ */
+#define ELEKTOR_VID            0x0C7D
+#define ELEKTOR_FT323R_PID     0x0005  /* RFID-Reader, issue 09-2006 */
+
+/*
+ * DSS-20 Sync Station for Sony Ericsson P800
+ */
+#define FTDI_DSS20_PID          0xFC82
+
+/*
+ * Home Electronics (www.home-electro.com) USB gadgets
+ */
+#define FTDI_HE_TIRA1_PID      0xFA78  /* Tira-1 IR transceiver */
+
+/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
+/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
+#define FTDI_USB_UIRT_PID      0xF850  /* Product Id */
+
+/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
+
+#define FTDI_TNC_X_PID         0xEBE0
+
+/*
+ * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
+ * All of these devices use FTDI's vendor ID (0x0403).
+ *
+ * The previously included PID for the UO 100 module was incorrect.
+ * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
+ *
+ * Armin Laeuger originally sent the PID for the UM 100 module.
+ */
+#define FTDI_R2000KU_TRUE_RNG  0xFB80  /* R2000KU TRUE RNG */
+#define FTDI_ELV_UR100_PID     0xFB58  /* USB-RS232-Umsetzer (UR 100) */
+#define FTDI_ELV_UM100_PID     0xFB5A  /* USB-Modul UM 100 */
+#define FTDI_ELV_UO100_PID     0xFB5B  /* USB-Modul UO 100 */
+#define FTDI_ELV_ALC8500_PID   0xF06E  /* ALC 8500 Expert */
+/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
+ * MS Windows, rather than the FTDI Virtual Com Port drivers.
+ * Maybe these will be easier to use with the libftdi/libusb user-space
+ * drivers, or possibly the Comedi drivers in some cases. */
+#define FTDI_ELV_CLI7000_PID   0xFB59  /* Computer-Light-Interface (CLI 7000) */
+#define FTDI_ELV_PPS7330_PID   0xFB5C  /* Processor-Power-Supply (PPS 7330) */
+#define FTDI_ELV_TFM100_PID    0xFB5D  /* Temperartur-Feuchte Messgeraet (TFM 100) */
+#define FTDI_ELV_UDF77_PID     0xFB5E  /* USB DCF Funkurh (UDF 77) */
+#define FTDI_ELV_UIO88_PID     0xFB5F  /* USB-I/O Interface (UIO 88) */
+#define FTDI_ELV_UAD8_PID      0xF068  /* USB-AD-Wandler (UAD 8) */
+#define FTDI_ELV_UDA7_PID      0xF069  /* USB-DA-Wandler (UDA 7) */
+#define FTDI_ELV_USI2_PID      0xF06A  /* USB-Schrittmotoren-Interface (USI 2) */
+#define FTDI_ELV_T1100_PID     0xF06B  /* Thermometer (T 1100) */
+#define FTDI_ELV_PCD200_PID    0xF06C  /* PC-Datenlogger (PCD 200) */
+#define FTDI_ELV_ULA200_PID    0xF06D  /* USB-LCD-Ansteuerung (ULA 200) */
+#define FTDI_ELV_FHZ1000PC_PID 0xF06F  /* FHZ 1000 PC */
+#define FTDI_ELV_CSI8_PID      0xE0F0  /* Computer-Schalt-Interface (CSI 8) */
+#define FTDI_ELV_EM1000DL_PID  0xE0F1  /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
+#define FTDI_ELV_PCK100_PID    0xE0F2  /* PC-Kabeltester (PCK 100) */
+#define FTDI_ELV_RFP500_PID    0xE0F3  /* HF-Leistungsmesser (RFP 500) */
+#define FTDI_ELV_FS20SIG_PID   0xE0F4  /* Signalgeber (FS 20 SIG) */
+#define FTDI_ELV_WS300PC_PID   0xE0F6  /* PC-Wetterstation (WS 300 PC) */
+#define FTDI_ELV_FHZ1300PC_PID 0xE0E8  /* FHZ 1300 PC */
+#define FTDI_ELV_WS500_PID     0xE0E9  /* PC-Wetterstation (WS 500) */
+#define FTDI_ELV_HS485_PID     0xE0EA  /* USB to RS-485 adapter */
+#define FTDI_ELV_EM1010PC_PID  0xE0EF  /* Engery monitor EM 1010 PC */
+#define FTDI_PHI_FISCO_PID      0xE40B  /* PHI Fisco USB to Serial cable */
+
+/*
+ * Definitions for ID TECH (www.idt-net.com) devices
+ */
+#define IDTECH_VID             0x0ACD  /* ID TECH Vendor ID */
+#define IDTECH_IDT1221U_PID    0x0300  /* IDT1221U USB to RS-232 adapter */
+
+/*
+ * Definitions for Omnidirectional Control Technology, Inc. devices
+ */
+#define OCT_VID                        0x0B39  /* OCT vendor ID */
+/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
+/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
+/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
+#define OCT_US101_PID          0x0421  /* OCT US101 USB to RS-232 */
+
+/* an infrared receiver for user access control with IR tags */
+#define FTDI_PIEGROUP_PID      0xF208  /* Product Id */
+
+/*
+ * Definitions for Artemis astronomical USB based cameras
+ * Check it at http://www.artemisccd.co.uk/
+ */
+#define FTDI_ARTEMIS_PID       0xDF28  /* All Artemis Cameras */
+
+/*
+ * Definitions for ATIK Instruments astronomical USB based cameras
+ * Check it at http://www.atik-instruments.com/
+ */
+#define FTDI_ATIK_ATK16_PID    0xDF30  /* ATIK ATK-16 Grayscale Camera */
+#define FTDI_ATIK_ATK16C_PID   0xDF32  /* ATIK ATK-16C Colour Camera */
+#define FTDI_ATIK_ATK16HR_PID  0xDF31  /* ATIK ATK-16HR Grayscale Camera */
+#define FTDI_ATIK_ATK16HRC_PID 0xDF33  /* ATIK ATK-16HRC Colour Camera */
+#define FTDI_ATIK_ATK16IC_PID   0xDF35  /* ATIK ATK-16IC Grayscale Camera */
+
+/*
+ * Protego product ids
+ */
+#define PROTEGO_SPECIAL_1      0xFC70  /* special/unknown device */
+#define PROTEGO_R2X0           0xFC71  /* R200-USB TRNG unit (R210, R220, and R230) */
+#define PROTEGO_SPECIAL_3      0xFC72  /* special/unknown device */
+#define PROTEGO_SPECIAL_4      0xFC73  /* special/unknown device */
+
+/*
+ * Gude Analog- und Digitalsysteme GmbH
+ */
+#define FTDI_GUDEADS_E808_PID    0xE808
+#define FTDI_GUDEADS_E809_PID    0xE809
+#define FTDI_GUDEADS_E80A_PID    0xE80A
+#define FTDI_GUDEADS_E80B_PID    0xE80B
+#define FTDI_GUDEADS_E80C_PID    0xE80C
+#define FTDI_GUDEADS_E80D_PID    0xE80D
+#define FTDI_GUDEADS_E80E_PID    0xE80E
+#define FTDI_GUDEADS_E80F_PID    0xE80F
+#define FTDI_GUDEADS_E888_PID    0xE888  /* Expert ISDN Control USB */
+#define FTDI_GUDEADS_E889_PID    0xE889  /* USB RS-232 OptoBridge */
+#define FTDI_GUDEADS_E88A_PID    0xE88A
+#define FTDI_GUDEADS_E88B_PID    0xE88B
+#define FTDI_GUDEADS_E88C_PID    0xE88C
+#define FTDI_GUDEADS_E88D_PID    0xE88D
+#define FTDI_GUDEADS_E88E_PID    0xE88E
+#define FTDI_GUDEADS_E88F_PID    0xE88F
+
+/*
+ * Linx Technologies product ids
+ */
+#define LINX_SDMUSBQSS_PID     0xF448  /* Linx SDM-USB-QS-S */
+#define LINX_MASTERDEVEL2_PID   0xF449   /* Linx Master Development 2.0 */
+#define LINX_FUTURE_0_PID   0xF44A   /* Linx future device */
+#define LINX_FUTURE_1_PID   0xF44B   /* Linx future device */
+#define LINX_FUTURE_2_PID   0xF44C   /* Linx future device */
+
+/* CCS Inc. ICDU/ICDU40 product ID - the FT232BM is used in an in-circuit-debugger */
+/* unit for PIC16's/PIC18's */
+#define FTDI_CCSICDU20_0_PID    0xF9D0
+#define FTDI_CCSICDU40_1_PID    0xF9D1
+#define FTDI_CCSMACHX_2_PID     0xF9D2
+#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
+#define FTDI_CCSICDU64_4_PID    0xF9D4
+#define FTDI_CCSPRIME8_5_PID    0xF9D5
+
+/* Inside Accesso contactless reader (http://www.insidefr.com) */
+#define INSIDE_ACCESSO         0xFAD0
+
+/*
+ * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
+ */
+#define INTREPID_VID           0x093C
+#define INTREPID_VALUECAN_PID  0x0601
+#define INTREPID_NEOVI_PID     0x0701
+
+/*
+ * Falcom Wireless Communications GmbH
+ */
+#define FALCOM_VID             0x0F94  /* Vendor Id */
+#define FALCOM_TWIST_PID       0x0001  /* Falcom Twist USB GPRS modem */
+#define FALCOM_SAMBA_PID       0x0005  /* Falcom Samba USB GPRS modem */
+
+/*
+ * SUUNTO product ids
+ */
+#define FTDI_SUUNTO_SPORTS_PID 0xF680  /* Suunto Sports instrument */
+
+/*
+ * Oceanic product ids
+ */
+#define FTDI_OCEANIC_PID       0xF460  /* Oceanic dive instrument */
+
+/*
+ * TTi (Thurlby Thandar Instruments)
+ */
+#define TTI_VID                        0x103E  /* Vendor Id */
+#define TTI_QL355P_PID         0x03E8  /* TTi QL355P power supply */
+
+/*
+ * Definitions for B&B Electronics products.
+ */
+#define BANDB_VID              0x0856  /* B&B Electronics Vendor ID */
+#define BANDB_USOTL4_PID       0xAC01  /* USOTL4 Isolated RS-485 Converter */
+#define BANDB_USTL4_PID                0xAC02  /* USTL4 RS-485 Converter */
+#define BANDB_USO9ML2_PID      0xAC03  /* USO9ML2 Isolated RS-232 Converter */
+#define BANDB_USOPTL4_PID      0xAC11
+#define BANDB_USPTL4_PID       0xAC12
+#define BANDB_USO9ML2DR_2_PID  0xAC16
+#define BANDB_USO9ML2DR_PID    0xAC17
+#define BANDB_USOPTL4DR2_PID   0xAC18  /* USOPTL4R-2 2-port Isolated RS-232 Converter */
+#define BANDB_USOPTL4DR_PID    0xAC19
+#define BANDB_485USB9F_2W_PID  0xAC25
+#define BANDB_485USB9F_4W_PID  0xAC26
+#define BANDB_232USB9M_PID     0xAC27
+#define BANDB_485USBTB_2W_PID  0xAC33
+#define BANDB_485USBTB_4W_PID  0xAC34
+#define BANDB_TTL5USB9M_PID    0xAC49
+#define BANDB_TTL3USB9M_PID    0xAC50
+#define BANDB_ZZ_PROG1_USB_PID 0xBA02
+
+/*
+ * RM Michaelides CANview USB (http://www.rmcan.com)
+ * CAN fieldbus interface adapter, added by port GmbH www.port.de)
+ * Ian Abbott changed the macro names for consistency.
+ */
+#define FTDI_RM_CANVIEW_PID    0xfd60  /* Product Id */
+
+/*
+ * EVER Eco Pro UPS (http://www.ever.com.pl/)
+ */
+
+#define        EVER_ECO_PRO_CDS        0xe520  /* RS-232 converter */
+
+/*
+ * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
+ * USB-TTY activ, USB-TTY passiv.  Some PIDs are used by several devices
+ * and I'm not entirely sure which are used by which.
+ */
+#define FTDI_4N_GALAXY_DE_1_PID        0xF3C0
+#define FTDI_4N_GALAXY_DE_2_PID        0xF3C1
+
+/*
+ * Mobility Electronics products.
+ */
+#define MOBILITY_VID                   0x1342
+#define MOBILITY_USB_SERIAL_PID                0x0202  /* EasiDock USB 200 serial */
+
+/*
+ * microHAM product IDs (http://www.microham.com).
+ * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
+ * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
+ * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
+ */
+#define FTDI_MHAM_KW_PID 0xEEE8                /* USB-KW interface */
+#define FTDI_MHAM_YS_PID 0xEEE9                /* USB-YS interface */
+#define FTDI_MHAM_Y6_PID 0xEEEA                /* USB-Y6 interface */
+#define FTDI_MHAM_Y8_PID 0xEEEB                /* USB-Y8 interface */
+#define FTDI_MHAM_IC_PID 0xEEEC                /* USB-IC interface */
+#define FTDI_MHAM_DB9_PID 0xEEED       /* USB-DB9 interface */
+#define FTDI_MHAM_RS232_PID 0xEEEE     /* USB-RS232 interface */
+#define FTDI_MHAM_Y9_PID 0xEEEF                /* USB-Y9 interface */
+
+/*
+ * Active Robots product ids.
+ */
+#define FTDI_ACTIVE_ROBOTS_PID 0xE548  /* USB comms board */
+
+/*
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+#define XSENS_CONVERTER_0_PID  0xD388
+#define XSENS_CONVERTER_1_PID  0xD389
+#define XSENS_CONVERTER_2_PID  0xD38A
+#define XSENS_CONVERTER_3_PID  0xD38B
+#define XSENS_CONVERTER_4_PID  0xD38C
+#define XSENS_CONVERTER_5_PID  0xD38D
+#define XSENS_CONVERTER_6_PID  0xD38E
+#define XSENS_CONVERTER_7_PID  0xD38F
+
+/*
+ * Teratronik product ids.
+ * Submitted by O. Wölfelschneider.
+ */
+#define FTDI_TERATRONIK_VCP_PID         0xEC88 /* Teratronik device (preferring VCP driver on windows) */
+#define FTDI_TERATRONIK_D2XX_PID 0xEC89        /* Teratronik device (preferring D2XX driver on windows) */
+
+/*
+ * Evolution Robotics products (http://www.evolution.com/).
+ * Submitted by Shawn M. Lavelle.
+ */
+#define EVOLUTION_VID          0xDEEE  /* Vendor ID */
+#define EVOLUTION_ER1_PID      0x0300  /* ER1 Control Module */
+#define EVO_8U232AM_PID        0x02FF  /* Evolution robotics RCM2 (FT232AM)*/
+#define EVO_HYBRID_PID         0x0302  /* Evolution robotics RCM4 PID (FT232BM)*/
+#define EVO_RCM4_PID           0x0303  /* Evolution robotics RCM4 PID */
+
+/* Pyramid Computer GmbH */
+#define FTDI_PYRAMID_PID       0xE6C8  /* Pyramid Appliance Display */
+
+/*
+ * NDI (www.ndigital.com) product ids
+ */
+#define FTDI_NDI_HUC_PID               0xDA70  /* NDI Host USB Converter */
+#define FTDI_NDI_SPECTRA_SCU_PID       0xDA71  /* NDI Spectra SCU */
+#define FTDI_NDI_FUTURE_2_PID          0xDA72  /* NDI future device #2 */
+#define FTDI_NDI_FUTURE_3_PID          0xDA73  /* NDI future device #3 */
+#define FTDI_NDI_AURORA_SCU_PID                0xDA74  /* NDI Aurora SCU */
+
+/*
+ * Posiflex inc retail equipment (http://www.posiflex.com.tw)
+ */
+#define POSIFLEX_VID           0x0d3a  /* Vendor ID */
+#define POSIFLEX_PP7000_PID    0x0300  /* PP-7000II thermal printer */
+
+/*
+ * Westrex International devices submitted by Cory Lee
+ */
+#define FTDI_WESTREX_MODEL_777_PID     0xDC00  /* Model 777 */
+#define FTDI_WESTREX_MODEL_8900F_PID   0xDC01  /* Model 8900F */
+
+/*
+ * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
+ */
+#define FTDI_RRCIRKITS_LOCOBUFFER_PID  0xc7d0  /* LocoBuffer USB */
+
+/*
+ * Eclo (http://www.eclo.pt/) product IDs.
+ * PID 0xEA90 submitted by Martin Grill.
+ */
+#define FTDI_ECLO_COM_1WIRE_PID        0xEA90  /* COM to 1-Wire USB adaptor */
+
+/*
+ * Papouch products (http://www.papouch.com/)
+ * Submitted by Folkert van Heusden
+ */
+
+#define PAPOUCH_VID                    0x5050  /* Vendor ID */
+#define PAPOUCH_TMU_PID                        0x0400  /* TMU USB Thermometer */
+#define PAPOUCH_QUIDO4x4_PID           0x0900  /* Quido 4/4 Module */
+
+/*
+ * ACG Identification Technologies GmbH products (http://www.acg.de/).
+ * Submitted by anton -at- goto10 -dot- org.
  */
+#define FTDI_ACG_HFDUAL_PID            0xDD20  /* HF Dual ISO Reader (RFID) */
+
+/*
+ * Yost Engineering, Inc. products (www.yostengineering.com).
+ * PID 0xE050 submitted by Aaron Prose.
+ */
+#define FTDI_YEI_SERVOCENTER31_PID     0xE050  /* YEI ServoCenter3.1 USB */
+
+/*
+ * ThorLabs USB motor drivers
+ */
+#define FTDI_THORLABS_PID              0xfaf0 /* ThorLabs USB motor drivers */
+
+/*
+ * Testo products (http://www.testo.com/)
+ * Submitted by Colin Leroy
+ */
+#define TESTO_VID                      0x128D
+#define TESTO_USB_INTERFACE_PID                0x0001
+
+/*
+ * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
+ */
+#define FTDI_GAMMA_SCOUT_PID           0xD678  /* Gamma Scout online */
+
+/*
+ * Tactrix OpenPort (ECU) devices.
+ * OpenPort 1.3M submitted by Donour Sizemore.
+ * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
+ */
+#define FTDI_TACTRIX_OPENPORT_13M_PID  0xCC48  /* OpenPort 1.3 Mitsubishi */
+#define FTDI_TACTRIX_OPENPORT_13S_PID  0xCC49  /* OpenPort 1.3 Subaru */
+#define FTDI_TACTRIX_OPENPORT_13U_PID  0xCC4A  /* OpenPort 1.3 Universal */
+
+/*
+ * Telldus Technologies
+ */
+#define TELLDUS_VID                    0x1781  /* Vendor ID */
+#define TELLDUS_TELLSTICK_PID          0x0C30  /* RF control dongle 433 MHz using FT232RL */
+
+/*
+ * IBS elektronik product ids
+ * Submitted by Thomas Schleusener
+ */
+#define FTDI_IBS_US485_PID     0xff38  /* IBS US485 (USB<-->RS422/485 interface) */
+#define FTDI_IBS_PICPRO_PID    0xff39  /* IBS PIC-Programmer */
+#define FTDI_IBS_PCMCIA_PID    0xff3a  /* IBS Card reader for PCMCIA SRAM-cards */
+#define FTDI_IBS_PK1_PID       0xff3b  /* IBS PK1 - Particel counter */
+#define FTDI_IBS_RS232MON_PID  0xff3c  /* IBS RS232 - Monitor */
+#define FTDI_IBS_APP70_PID     0xff3d  /* APP 70 (dust monitoring system) */
+#define FTDI_IBS_PEDO_PID      0xff3e  /* IBS PEDO-Modem (RF modem 868.35 MHz) */
+#define FTDI_IBS_PROD_PID      0xff3f  /* future device */
+
+/*
+ *  MaxStream devices  www.maxstream.net
+ */
+#define FTDI_MAXSTREAM_PID     0xEE18  /* Xbee PKG-U Module */
+
+/* Olimex */
+#define OLIMEX_VID                     0x15BA
+#define OLIMEX_ARM_USB_OCD_PID         0x0003
+
+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
+#define LMI_LM3S_DEVEL_BOARD_PID       0xbcd8
+#define LMI_LM3S_EVAL_BOARD_PID                0xbcd9
+
+/* www.elsterelectricity.com Elster Unicom III Optical Probe */
+#define FTDI_ELSTER_UNICOM_PID         0xE700 /* Product Id */
+
+/*
+ * The Mobility Lab (TML)
+ * Submitted by Pierre Castella
+ */
+#define TML_VID                        0x1B91  /* Vendor ID */
+#define TML_USB_SERIAL_PID     0x0064  /* USB - Serial Converter */
+
+/* Propox devices */
+#define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
+
+/* Rig Expert Ukraine devices */
+#define FTDI_REU_TINY_PID              0xED22  /* RigExpert Tiny */
+
+/* Domintell products  http://www.domintell.com */
+#define FTDI_DOMINTELL_DGQG_PID        0xEF50  /* Master */
+#define FTDI_DOMINTELL_DUSB_PID        0xEF51  /* DUSB01 module */
+
+/* Alti-2 products  http://www.alti-2.com */
+#define ALTI2_VID      0x1BC9
+#define ALTI2_N3_PID   0x6001  /* Neptune 3 */
 
 /* Commands */
 #define FTDI_SIO_RESET                 0 /* Reset the port */
 #define INTERFACE_C            3
 #define INTERFACE_D            4
 
+/*
+ * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
+ * Submitted by Harald Welte <laforge@openmoko.org>
+ */
+#define        FIC_VID                 0x1457
+#define        FIC_NEO1973_DEBUG_PID   0x5118
+
+/*
+ * RATOC REX-USB60F
+ */
+#define RATOC_VENDOR_ID                0x0584
+#define RATOC_PRODUCT_ID_USB60F        0xb020
+
+/*
+ * DIEBOLD BCS SE923
+ */
+#define DIEBOLD_BCS_SE923_PID  0xfb99
+
+/*
+ * Atmel STK541
+ */
+#define ATMEL_VID              0x03eb /* Vendor ID */
+#define STK541_PID             0x2109 /* Zigbee Controller */
+
+/*
+ * Dresden Elektronic Sensor Terminal Board
+ */
+#define DE_VID                 0x1cf1 /* Vendor ID */
+#define STB_PID                        0x0001 /* Sensor Terminal Board */
+#define WHT_PID                        0x0004 /* Wireless Handheld Terminal */
+
+/*
+ * Blackfin gnICE JTAG
+ * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
+ */
+#define ADI_VID                0x0456
+#define ADI_GNICE_PID          0xF000
+#define ADI_GNICEPLUS_PID      0xF001
+
+/*
+ * JETI SPECTROMETER SPECBOS 1201
+ * http://www.jeti.com/products/sys/scb/scb1201.php
+ */
+#define JETI_VID               0x0c6c
+#define JETI_SPC1201_PID       0x04b2
+
+/*
+ * Marvell SheevaPlug
+ */
+#define MARVELL_VID            0x9e88
+#define MARVELL_SHEEVAPLUG_PID 0x9e8f
+
+#define FTDI_TURTELIZER_PID    0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
+
+/*
+ * GN Otometrics (http://www.otometrics.com)
+ * Submitted by Ville Sundberg.
+ */
+#define GN_OTOMETRICS_VID      0x0c33  /* Vendor ID */
+#define AURICAL_USB_PID                0x0010  /* Aurical USB Audiometer */
+
+/*
+ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
+ * http://winglucofacts.com/cables/
+ */
+#define BAYER_VID                      0x1A79
+#define BAYER_CONTOUR_CABLE_PID        0x6001
+
+/*
+ * Marvell OpenRD Base, Client
+ * http://www.open-rd.org
+ * OpenRD Base, Client use VID 0x0403
+ */
+#define MARVELL_OPENRD_PID     0x9e90
+
+/*
+ * Hameg HO820 and HO870 interface (using VID 0x0403)
+ */
+#define        HAMEG_HO820_PID         0xed74
+#define        HAMEG_HO870_PID         0xed71
 
 /*
  *   BmRequestType:  1100 0000b
@@ -554,3 +1504,4 @@ typedef enum {
  * B2..7       Length of message - (not including Byte 0)
  *
  */
+
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
deleted file mode 100644 (file)
index 54d8fd1..0000000
+++ /dev/null
@@ -1,1120 +0,0 @@
-/*
- * vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
- * Please keep numerically sorted within individual areas, thanks!
- *
- * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
- * from Rudolf Gugler
- *
- */
-
-
-/**********************************/
-/***** devices using FTDI VID *****/
-/**********************************/
-
-
-#define FTDI_VID       0x0403  /* Vendor Id */
-
-
-/*** "original" FTDI device PIDs ***/
-
-#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
-#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
-#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
-#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
-#define FTDI_SIO_PID   0x8372  /* Product Id SIO application of 8U100AX  */
-#define FTDI_232RL_PID  0xFBFA  /* Product ID for FT232RL */
-
-
-/*** third-party PIDs (using FTDI_VID) ***/
-
-/*
- * Marvell OpenRD Base, Client
- * http://www.open-rd.org
- * OpenRD Base, Client use VID 0x0403
- */
-#define MARVELL_OPENRD_PID     0x9e90
-
-/* www.candapter.com Ewert Energy Systems CANdapter device */
-#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
-
-#define FTDI_NXTCAM_PID                0xABB8 /* NXTCam for Mindstorms NXT */
-
-/* US Interface Navigator (http://www.usinterface.com/) */
-#define FTDI_USINT_CAT_PID     0xb810  /* Navigator CAT and 2nd PTT lines */
-#define FTDI_USINT_WKEY_PID    0xb811  /* Navigator WKEY and FSK lines */
-#define FTDI_USINT_RS232_PID   0xb812  /* Navigator RS232 and CONFIG lines */
-
-/* OOCDlink by Joern Kaipf <joernk@web.de>
- * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
-#define FTDI_OOCDLINK_PID      0xbaf8  /* Amontec JTAGkey */
-
-/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
-/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
-#define LMI_LM3S_DEVEL_BOARD_PID       0xbcd8
-#define LMI_LM3S_EVAL_BOARD_PID                0xbcd9
-
-#define FTDI_TURTELIZER_PID    0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
-
-/* OpenDCC (www.opendcc.de) product id */
-#define FTDI_OPENDCC_PID       0xBFD8
-#define FTDI_OPENDCC_SNIFFER_PID       0xBFD9
-#define FTDI_OPENDCC_THROTTLE_PID      0xBFDA
-#define FTDI_OPENDCC_GATEWAY_PID       0xBFDB
-#define FTDI_OPENDCC_GBM_PID   0xBFDC
-
-/*
- * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
- */
-#define FTDI_RRCIRKITS_LOCOBUFFER_PID  0xc7d0  /* LocoBuffer USB */
-
-/* DMX4ALL DMX Interfaces */
-#define FTDI_DMX4ALL 0xC850
-
-/*
- * ASK.fr devices
- */
-#define FTDI_ASK_RDR400_PID    0xC991  /* ASK RDR 400 series card reader */
-
-/* www.starting-point-systems.com µChameleon device */
-#define FTDI_MICRO_CHAMELEON_PID       0xCAA0  /* Product Id */
-
-/*
- * Tactrix OpenPort (ECU) devices.
- * OpenPort 1.3M submitted by Donour Sizemore.
- * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
- */
-#define FTDI_TACTRIX_OPENPORT_13M_PID  0xCC48  /* OpenPort 1.3 Mitsubishi */
-#define FTDI_TACTRIX_OPENPORT_13S_PID  0xCC49  /* OpenPort 1.3 Subaru */
-#define FTDI_TACTRIX_OPENPORT_13U_PID  0xCC4A  /* OpenPort 1.3 Universal */
-
-/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
-/* the VID is the standard ftdi vid (FTDI_VID) */
-#define FTDI_SCS_DEVICE_0_PID 0xD010    /* SCS PTC-IIusb */
-#define FTDI_SCS_DEVICE_1_PID 0xD011    /* SCS Tracker / DSP TNC */
-#define FTDI_SCS_DEVICE_2_PID 0xD012
-#define FTDI_SCS_DEVICE_3_PID 0xD013
-#define FTDI_SCS_DEVICE_4_PID 0xD014
-#define FTDI_SCS_DEVICE_5_PID 0xD015
-#define FTDI_SCS_DEVICE_6_PID 0xD016
-#define FTDI_SCS_DEVICE_7_PID 0xD017
-
-/* iPlus device */
-#define FTDI_IPLUS_PID 0xD070 /* Product Id */
-#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
-
-/*
- * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
- */
-#define FTDI_GAMMA_SCOUT_PID           0xD678  /* Gamma Scout online */
-
-/* Propox devices */
-#define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
-
-/* Lenz LI-USB Computer Interface. */
-#define FTDI_LENZ_LIUSB_PID    0xD780
-
-/* Vardaan Enterprises Serial Interface VEUSB422R3 */
-#define FTDI_VARDAAN_PID       0xF070
-
-/*
- * Xsens Technologies BV products (http://www.xsens.com).
- */
-#define XSENS_CONVERTER_0_PID  0xD388
-#define XSENS_CONVERTER_1_PID  0xD389
-#define XSENS_CONVERTER_2_PID  0xD38A
-#define XSENS_CONVERTER_3_PID  0xD38B
-#define XSENS_CONVERTER_4_PID  0xD38C
-#define XSENS_CONVERTER_5_PID  0xD38D
-#define XSENS_CONVERTER_6_PID  0xD38E
-#define XSENS_CONVERTER_7_PID  0xD38F
-
-/*
- * NDI (www.ndigital.com) product ids
- */
-#define FTDI_NDI_HUC_PID               0xDA70  /* NDI Host USB Converter */
-#define FTDI_NDI_SPECTRA_SCU_PID       0xDA71  /* NDI Spectra SCU */
-#define FTDI_NDI_FUTURE_2_PID          0xDA72  /* NDI future device #2 */
-#define FTDI_NDI_FUTURE_3_PID          0xDA73  /* NDI future device #3 */
-#define FTDI_NDI_AURORA_SCU_PID                0xDA74  /* NDI Aurora SCU */
-
-/*
- * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
- */
-#define FTDI_CHAMSYS_24_MASTER_WING_PID        0xDAF8
-#define FTDI_CHAMSYS_PC_WING_PID       0xDAF9
-#define FTDI_CHAMSYS_USB_DMX_PID       0xDAFA
-#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
-#define FTDI_CHAMSYS_MINI_WING_PID     0xDAFC
-#define FTDI_CHAMSYS_MAXI_WING_PID     0xDAFD
-#define FTDI_CHAMSYS_MEDIA_WING_PID    0xDAFE
-#define FTDI_CHAMSYS_WING_PID  0xDAFF
-
-/*
- * Westrex International devices submitted by Cory Lee
- */
-#define FTDI_WESTREX_MODEL_777_PID     0xDC00  /* Model 777 */
-#define FTDI_WESTREX_MODEL_8900F_PID   0xDC01  /* Model 8900F */
-
-/*
- * ACG Identification Technologies GmbH products (http://www.acg.de/).
- * Submitted by anton -at- goto10 -dot- org.
- */
-#define FTDI_ACG_HFDUAL_PID            0xDD20  /* HF Dual ISO Reader (RFID) */
-
-/*
- * Definitions for Artemis astronomical USB based cameras
- * Check it at http://www.artemisccd.co.uk/
- */
-#define FTDI_ARTEMIS_PID       0xDF28  /* All Artemis Cameras */
-
-/*
- * Definitions for ATIK Instruments astronomical USB based cameras
- * Check it at http://www.atik-instruments.com/
- */
-#define FTDI_ATIK_ATK16_PID    0xDF30  /* ATIK ATK-16 Grayscale Camera */
-#define FTDI_ATIK_ATK16C_PID   0xDF32  /* ATIK ATK-16C Colour Camera */
-#define FTDI_ATIK_ATK16HR_PID  0xDF31  /* ATIK ATK-16HR Grayscale Camera */
-#define FTDI_ATIK_ATK16HRC_PID 0xDF33  /* ATIK ATK-16HRC Colour Camera */
-#define FTDI_ATIK_ATK16IC_PID   0xDF35  /* ATIK ATK-16IC Grayscale Camera */
-
-/*
- * Yost Engineering, Inc. products (www.yostengineering.com).
- * PID 0xE050 submitted by Aaron Prose.
- */
-#define FTDI_YEI_SERVOCENTER31_PID     0xE050  /* YEI ServoCenter3.1 USB */
-
-/*
- * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
- * All of these devices use FTDI's vendor ID (0x0403).
- * Further IDs taken from ELV Windows .inf file.
- *
- * The previously included PID for the UO 100 module was incorrect.
- * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
- *
- * Armin Laeuger originally sent the PID for the UM 100 module.
- */
-#define FTDI_ELV_USR_PID       0xE000  /* ELV Universal-Sound-Recorder */
-#define FTDI_ELV_MSM1_PID      0xE001  /* ELV Mini-Sound-Modul */
-#define FTDI_ELV_KL100_PID     0xE002  /* ELV Kfz-Leistungsmesser KL 100 */
-#define FTDI_ELV_WS550_PID     0xE004  /* WS 550 */
-#define FTDI_ELV_EC3000_PID    0xE006  /* ENERGY CONTROL 3000 USB */
-#define FTDI_ELV_WS888_PID     0xE008  /* WS 888 */
-#define FTDI_ELV_TWS550_PID    0xE009  /* Technoline WS 550 */
-#define FTDI_ELV_FEM_PID       0xE00A  /* Funk Energie Monitor */
-#define FTDI_ELV_FHZ1300PC_PID 0xE0E8  /* FHZ 1300 PC */
-#define FTDI_ELV_WS500_PID     0xE0E9  /* PC-Wetterstation (WS 500) */
-#define FTDI_ELV_HS485_PID     0xE0EA  /* USB to RS-485 adapter */
-#define FTDI_ELV_UMS100_PID    0xE0EB  /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
-#define FTDI_ELV_TFD128_PID    0xE0EC  /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
-#define FTDI_ELV_FM3RX_PID     0xE0ED  /* ELV Messwertuebertragung FM3 RX */
-#define FTDI_ELV_WS777_PID     0xE0EE  /* Conrad WS 777 */
-#define FTDI_ELV_EM1010PC_PID  0xE0EF  /* Engery monitor EM 1010 PC */
-#define FTDI_ELV_CSI8_PID      0xE0F0  /* Computer-Schalt-Interface (CSI 8) */
-#define FTDI_ELV_EM1000DL_PID  0xE0F1  /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
-#define FTDI_ELV_PCK100_PID    0xE0F2  /* PC-Kabeltester (PCK 100) */
-#define FTDI_ELV_RFP500_PID    0xE0F3  /* HF-Leistungsmesser (RFP 500) */
-#define FTDI_ELV_FS20SIG_PID   0xE0F4  /* Signalgeber (FS 20 SIG) */
-#define FTDI_ELV_UTP8_PID      0xE0F5  /* ELV UTP 8 */
-#define FTDI_ELV_WS300PC_PID   0xE0F6  /* PC-Wetterstation (WS 300 PC) */
-#define FTDI_ELV_WS444PC_PID   0xE0F7  /* Conrad WS 444 PC */
-#define FTDI_PHI_FISCO_PID      0xE40B  /* PHI Fisco USB to Serial cable */
-#define FTDI_ELV_UAD8_PID      0xF068  /* USB-AD-Wandler (UAD 8) */
-#define FTDI_ELV_UDA7_PID      0xF069  /* USB-DA-Wandler (UDA 7) */
-#define FTDI_ELV_USI2_PID      0xF06A  /* USB-Schrittmotoren-Interface (USI 2) */
-#define FTDI_ELV_T1100_PID     0xF06B  /* Thermometer (T 1100) */
-#define FTDI_ELV_PCD200_PID    0xF06C  /* PC-Datenlogger (PCD 200) */
-#define FTDI_ELV_ULA200_PID    0xF06D  /* USB-LCD-Ansteuerung (ULA 200) */
-#define FTDI_ELV_ALC8500_PID   0xF06E  /* ALC 8500 Expert */
-#define FTDI_ELV_FHZ1000PC_PID 0xF06F  /* FHZ 1000 PC */
-#define FTDI_ELV_UR100_PID     0xFB58  /* USB-RS232-Umsetzer (UR 100) */
-#define FTDI_ELV_UM100_PID     0xFB5A  /* USB-Modul UM 100 */
-#define FTDI_ELV_UO100_PID     0xFB5B  /* USB-Modul UO 100 */
-/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
- * MS Windows, rather than the FTDI Virtual Com Port drivers.
- * Maybe these will be easier to use with the libftdi/libusb user-space
- * drivers, or possibly the Comedi drivers in some cases. */
-#define FTDI_ELV_CLI7000_PID   0xFB59  /* Computer-Light-Interface (CLI 7000) */
-#define FTDI_ELV_PPS7330_PID   0xFB5C  /* Processor-Power-Supply (PPS 7330) */
-#define FTDI_ELV_TFM100_PID    0xFB5D  /* Temperartur-Feuchte Messgeraet (TFM 100) */
-#define FTDI_ELV_UDF77_PID     0xFB5E  /* USB DCF Funkurh (UDF 77) */
-#define FTDI_ELV_UIO88_PID     0xFB5F  /* USB-I/O Interface (UIO 88) */
-
-/*
- * EVER Eco Pro UPS (http://www.ever.com.pl/)
- */
-
-#define        EVER_ECO_PRO_CDS        0xe520  /* RS-232 converter */
-
-/*
- * Active Robots product ids.
- */
-#define FTDI_ACTIVE_ROBOTS_PID 0xE548  /* USB comms board */
-
-/* Pyramid Computer GmbH */
-#define FTDI_PYRAMID_PID       0xE6C8  /* Pyramid Appliance Display */
-
-/* www.elsterelectricity.com Elster Unicom III Optical Probe */
-#define FTDI_ELSTER_UNICOM_PID         0xE700 /* Product Id */
-
-/*
- * Gude Analog- und Digitalsysteme GmbH
- */
-#define FTDI_GUDEADS_E808_PID    0xE808
-#define FTDI_GUDEADS_E809_PID    0xE809
-#define FTDI_GUDEADS_E80A_PID    0xE80A
-#define FTDI_GUDEADS_E80B_PID    0xE80B
-#define FTDI_GUDEADS_E80C_PID    0xE80C
-#define FTDI_GUDEADS_E80D_PID    0xE80D
-#define FTDI_GUDEADS_E80E_PID    0xE80E
-#define FTDI_GUDEADS_E80F_PID    0xE80F
-#define FTDI_GUDEADS_E888_PID    0xE888  /* Expert ISDN Control USB */
-#define FTDI_GUDEADS_E889_PID    0xE889  /* USB RS-232 OptoBridge */
-#define FTDI_GUDEADS_E88A_PID    0xE88A
-#define FTDI_GUDEADS_E88B_PID    0xE88B
-#define FTDI_GUDEADS_E88C_PID    0xE88C
-#define FTDI_GUDEADS_E88D_PID    0xE88D
-#define FTDI_GUDEADS_E88E_PID    0xE88E
-#define FTDI_GUDEADS_E88F_PID    0xE88F
-
-/*
- * Eclo (http://www.eclo.pt/) product IDs.
- * PID 0xEA90 submitted by Martin Grill.
- */
-#define FTDI_ECLO_COM_1WIRE_PID        0xEA90  /* COM to 1-Wire USB adaptor */
-
-/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
-#define FTDI_TNC_X_PID         0xEBE0
-
-/*
- * Teratronik product ids.
- * Submitted by O. Wölfelschneider.
- */
-#define FTDI_TERATRONIK_VCP_PID         0xEC88 /* Teratronik device (preferring VCP driver on windows) */
-#define FTDI_TERATRONIK_D2XX_PID 0xEC89        /* Teratronik device (preferring D2XX driver on windows) */
-
-/* Rig Expert Ukraine devices */
-#define FTDI_REU_TINY_PID              0xED22  /* RigExpert Tiny */
-
-/*
- * Hameg HO820 and HO870 interface (using VID 0x0403)
- */
-#define        HAMEG_HO820_PID         0xed74
-#define        HAMEG_HO870_PID         0xed71
-
-/*
- *  MaxStream devices  www.maxstream.net
- */
-#define FTDI_MAXSTREAM_PID     0xEE18  /* Xbee PKG-U Module */
-
-/*
- * microHAM product IDs (http://www.microham.com).
- * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
- * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
- * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
- */
-#define FTDI_MHAM_KW_PID 0xEEE8                /* USB-KW interface */
-#define FTDI_MHAM_YS_PID 0xEEE9                /* USB-YS interface */
-#define FTDI_MHAM_Y6_PID 0xEEEA                /* USB-Y6 interface */
-#define FTDI_MHAM_Y8_PID 0xEEEB                /* USB-Y8 interface */
-#define FTDI_MHAM_IC_PID 0xEEEC                /* USB-IC interface */
-#define FTDI_MHAM_DB9_PID 0xEEED       /* USB-DB9 interface */
-#define FTDI_MHAM_RS232_PID 0xEEEE     /* USB-RS232 interface */
-#define FTDI_MHAM_Y9_PID 0xEEEF                /* USB-Y9 interface */
-
-/* Domintell products  http://www.domintell.com */
-#define FTDI_DOMINTELL_DGQG_PID        0xEF50  /* Master */
-#define FTDI_DOMINTELL_DUSB_PID        0xEF51  /* DUSB01 module */
-
-/*
- * The following are the values for the Perle Systems
- * UltraPort USB serial converters
- */
-#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0        /* Perle UltraPort Product Id */
-
-/* Sprog II (Andrew Crosland's SprogII DCC interface) */
-#define FTDI_SPROG_II          0xF0C8
-
-/* an infrared receiver for user access control with IR tags */
-#define FTDI_PIEGROUP_PID      0xF208  /* Product Id */
-
-/* ACT Solutions HomePro ZWave interface
-   (http://www.act-solutions.com/HomePro.htm) */
-#define FTDI_ACTZWAVE_PID      0xF2D0
-
-/*
- * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
- * USB-TTY activ, USB-TTY passiv.  Some PIDs are used by several devices
- * and I'm not entirely sure which are used by which.
- */
-#define FTDI_4N_GALAXY_DE_1_PID        0xF3C0
-#define FTDI_4N_GALAXY_DE_2_PID        0xF3C1
-
-/*
- * Linx Technologies product ids
- */
-#define LINX_SDMUSBQSS_PID     0xF448  /* Linx SDM-USB-QS-S */
-#define LINX_MASTERDEVEL2_PID   0xF449   /* Linx Master Development 2.0 */
-#define LINX_FUTURE_0_PID   0xF44A   /* Linx future device */
-#define LINX_FUTURE_1_PID   0xF44B   /* Linx future device */
-#define LINX_FUTURE_2_PID   0xF44C   /* Linx future device */
-
-/*
- * Oceanic product ids
- */
-#define FTDI_OCEANIC_PID       0xF460  /* Oceanic dive instrument */
-
-/*
- * SUUNTO product ids
- */
-#define FTDI_SUUNTO_SPORTS_PID 0xF680  /* Suunto Sports instrument */
-
-/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
-/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
-#define FTDI_USB_UIRT_PID      0xF850  /* Product Id */
-
-/* CCS Inc. ICDU/ICDU40 product ID -
- * the FT232BM is used in an in-circuit-debugger unit for PIC16's/PIC18's */
-#define FTDI_CCSICDU20_0_PID    0xF9D0
-#define FTDI_CCSICDU40_1_PID    0xF9D1
-#define FTDI_CCSMACHX_2_PID     0xF9D2
-#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
-#define FTDI_CCSICDU64_4_PID    0xF9D4
-#define FTDI_CCSPRIME8_5_PID    0xF9D5
-
-/*
- * The following are the values for the Matrix Orbital LCD displays,
- * which are the FT232BM ( similar to the 8U232AM )
- */
-#define FTDI_MTXORB_0_PID      0xFA00  /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_1_PID      0xFA01  /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_2_PID      0xFA02  /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_3_PID      0xFA03  /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_4_PID      0xFA04  /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_5_PID      0xFA05  /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_6_PID      0xFA06  /* Matrix Orbital Product Id */
-
-/*
- * Home Electronics (www.home-electro.com) USB gadgets
- */
-#define FTDI_HE_TIRA1_PID      0xFA78  /* Tira-1 IR transceiver */
-
-/* Inside Accesso contactless reader (http://www.insidefr.com) */
-#define INSIDE_ACCESSO         0xFAD0
-
-/*
- * ThorLabs USB motor drivers
- */
-#define FTDI_THORLABS_PID              0xfaf0 /* ThorLabs USB motor drivers */
-
-/*
- * Protego product ids
- */
-#define PROTEGO_SPECIAL_1      0xFC70  /* special/unknown device */
-#define PROTEGO_R2X0           0xFC71  /* R200-USB TRNG unit (R210, R220, and R230) */
-#define PROTEGO_SPECIAL_3      0xFC72  /* special/unknown device */
-#define PROTEGO_SPECIAL_4      0xFC73  /* special/unknown device */
-
-/*
- * DSS-20 Sync Station for Sony Ericsson P800
- */
-#define FTDI_DSS20_PID          0xFC82
-
-/* www.irtrans.de device */
-#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
-
-/*
- * RM Michaelides CANview USB (http://www.rmcan.com) (FTDI_VID)
- * CAN fieldbus interface adapter, added by port GmbH www.port.de)
- * Ian Abbott changed the macro names for consistency.
- */
-#define FTDI_RM_CANVIEW_PID    0xfd60  /* Product Id */
-/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
-#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
-
-#define FTDI_USBX_707_PID 0xF857       /* ADSTech IR Blaster USBX-707 (FTDI_VID) */
-
-#define FTDI_RELAIS_PID        0xFA10  /* Relais device from Rudolf Gugler */
-
-/*
- * PCDJ use ftdi based dj-controllers. The following PID is
- * for their DAC-2 device http://www.pcdjhardware.com/DAC2.asp
- * (the VID is the standard ftdi vid (FTDI_VID), PID sent by Wouter Paesen)
- */
-#define FTDI_PCDJ_DAC2_PID 0xFA88
-
-#define FTDI_R2000KU_TRUE_RNG  0xFB80  /* R2000KU TRUE RNG (FTDI_VID) */
-
-/*
- * DIEBOLD BCS SE923 (FTDI_VID)
- */
-#define DIEBOLD_BCS_SE923_PID  0xfb99
-
-/* www.crystalfontz.com devices
- * - thanx for providing free devices for evaluation !
- * they use the ftdi chipset for the USB interface
- * and the vendor id is the same
- */
-#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
-#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
-#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
-#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
-#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
-#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
-#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
-#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
-
-/*
- * Video Networks Limited / Homechoice in the UK use an ftdi-based device
- * for their 1Mb broadband internet service.  The following PID is exhibited
- * by the usb device supplied (the VID is the standard ftdi vid (FTDI_VID)
- */
-#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
-
-/* AlphaMicro Components AMC-232USB01 device (FTDI_VID) */
-#define FTDI_AMC232_PID 0xFF00 /* Product Id */
-
-/*
- * IBS elektronik product ids (FTDI_VID)
- * Submitted by Thomas Schleusener
- */
-#define FTDI_IBS_US485_PID     0xff38  /* IBS US485 (USB<-->RS422/485 interface) */
-#define FTDI_IBS_PICPRO_PID    0xff39  /* IBS PIC-Programmer */
-#define FTDI_IBS_PCMCIA_PID    0xff3a  /* IBS Card reader for PCMCIA SRAM-cards */
-#define FTDI_IBS_PK1_PID       0xff3b  /* IBS PK1 - Particel counter */
-#define FTDI_IBS_RS232MON_PID  0xff3c  /* IBS RS232 - Monitor */
-#define FTDI_IBS_APP70_PID     0xff3d  /* APP 70 (dust monitoring system) */
-#define FTDI_IBS_PEDO_PID      0xff3e  /* IBS PEDO-Modem (RF modem 868.35 MHz) */
-#define FTDI_IBS_PROD_PID      0xff3f  /* future device */
-/* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
-#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
-
-
-
-/********************************/
-/** third-party VID/PID combos **/
-/********************************/
-
-
-
-/*
- * Atmel STK541
- */
-#define ATMEL_VID              0x03eb /* Vendor ID */
-#define STK541_PID             0x2109 /* Zigbee Controller */
-
-/*
- * Blackfin gnICE JTAG
- * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
- */
-#define ADI_VID                0x0456
-#define ADI_GNICE_PID          0xF000
-#define ADI_GNICEPLUS_PID      0xF001
-
-/*
- * RATOC REX-USB60F
- */
-#define RATOC_VENDOR_ID                0x0584
-#define RATOC_PRODUCT_ID_USB60F        0xb020
-
-/*
- * Contec products (http://www.contec.com)
- * Submitted by Daniel Sangorrin
- */
-#define CONTEC_VID             0x06CE  /* Vendor ID */
-#define CONTEC_COM1USBH_PID    0x8311  /* COM-1(USB)H */
-
-/*
- * Contec products (http://www.contec.com)
- * Submitted by Daniel Sangorrin
- */
-#define CONTEC_VID             0x06CE  /* Vendor ID */
-#define CONTEC_COM1USBH_PID    0x8311  /* COM-1(USB)H */
-
-/*
- * Definitions for B&B Electronics products.
- */
-#define BANDB_VID              0x0856  /* B&B Electronics Vendor ID */
-#define BANDB_USOTL4_PID       0xAC01  /* USOTL4 Isolated RS-485 Converter */
-#define BANDB_USTL4_PID                0xAC02  /* USTL4 RS-485 Converter */
-#define BANDB_USO9ML2_PID      0xAC03  /* USO9ML2 Isolated RS-232 Converter */
-#define BANDB_USOPTL4_PID      0xAC11
-#define BANDB_USPTL4_PID       0xAC12
-#define BANDB_USO9ML2DR_2_PID  0xAC16
-#define BANDB_USO9ML2DR_PID    0xAC17
-#define BANDB_USOPTL4DR2_PID   0xAC18  /* USOPTL4R-2 2-port Isolated RS-232 Converter */
-#define BANDB_USOPTL4DR_PID    0xAC19
-#define BANDB_485USB9F_2W_PID  0xAC25
-#define BANDB_485USB9F_4W_PID  0xAC26
-#define BANDB_232USB9M_PID     0xAC27
-#define BANDB_485USBTB_2W_PID  0xAC33
-#define BANDB_485USBTB_4W_PID  0xAC34
-#define BANDB_TTL5USB9M_PID    0xAC49
-#define BANDB_TTL3USB9M_PID    0xAC50
-#define BANDB_ZZ_PROG1_USB_PID 0xBA02
-
-/*
- * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
- */
-#define INTREPID_VID           0x093C
-#define INTREPID_VALUECAN_PID  0x0601
-#define INTREPID_NEOVI_PID     0x0701
-
-/*
- * Definitions for ID TECH (www.idt-net.com) devices
- */
-#define IDTECH_VID             0x0ACD  /* ID TECH Vendor ID */
-#define IDTECH_IDT1221U_PID    0x0300  /* IDT1221U USB to RS-232 adapter */
-
-/*
- * Definitions for Omnidirectional Control Technology, Inc. devices
- */
-#define OCT_VID                        0x0B39  /* OCT vendor ID */
-/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
-/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
-/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
-#define OCT_US101_PID          0x0421  /* OCT US101 USB to RS-232 */
-
-/*
- * Icom ID-1 digital transceiver
- */
-
-#define ICOM_ID1_VID            0x0C26
-#define ICOM_ID1_PID            0x0004
-
-/*
- * GN Otometrics (http://www.otometrics.com)
- * Submitted by Ville Sundberg.
- */
-#define GN_OTOMETRICS_VID      0x0c33  /* Vendor ID */
-#define AURICAL_USB_PID                0x0010  /* Aurical USB Audiometer */
-
-/*
- * The following are the values for the Sealevel SeaLINK+ adapters.
- * (Original list sent by Tuan Hoang.  Ian Abbott renamed the macros and
- * removed some PIDs that don't seem to match any existing products.)
- */
-#define SEALEVEL_VID           0x0c52  /* Sealevel Vendor ID */
-#define SEALEVEL_2101_PID      0x2101  /* SeaLINK+232 (2101/2105) */
-#define SEALEVEL_2102_PID      0x2102  /* SeaLINK+485 (2102) */
-#define SEALEVEL_2103_PID      0x2103  /* SeaLINK+232I (2103) */
-#define SEALEVEL_2104_PID      0x2104  /* SeaLINK+485I (2104) */
-#define SEALEVEL_2106_PID      0x9020  /* SeaLINK+422 (2106) */
-#define SEALEVEL_2201_1_PID    0x2211  /* SeaPORT+2/232 (2201) Port 1 */
-#define SEALEVEL_2201_2_PID    0x2221  /* SeaPORT+2/232 (2201) Port 2 */
-#define SEALEVEL_2202_1_PID    0x2212  /* SeaPORT+2/485 (2202) Port 1 */
-#define SEALEVEL_2202_2_PID    0x2222  /* SeaPORT+2/485 (2202) Port 2 */
-#define SEALEVEL_2203_1_PID    0x2213  /* SeaPORT+2 (2203) Port 1 */
-#define SEALEVEL_2203_2_PID    0x2223  /* SeaPORT+2 (2203) Port 2 */
-#define SEALEVEL_2401_1_PID    0x2411  /* SeaPORT+4/232 (2401) Port 1 */
-#define SEALEVEL_2401_2_PID    0x2421  /* SeaPORT+4/232 (2401) Port 2 */
-#define SEALEVEL_2401_3_PID    0x2431  /* SeaPORT+4/232 (2401) Port 3 */
-#define SEALEVEL_2401_4_PID    0x2441  /* SeaPORT+4/232 (2401) Port 4 */
-#define SEALEVEL_2402_1_PID    0x2412  /* SeaPORT+4/485 (2402) Port 1 */
-#define SEALEVEL_2402_2_PID    0x2422  /* SeaPORT+4/485 (2402) Port 2 */
-#define SEALEVEL_2402_3_PID    0x2432  /* SeaPORT+4/485 (2402) Port 3 */
-#define SEALEVEL_2402_4_PID    0x2442  /* SeaPORT+4/485 (2402) Port 4 */
-#define SEALEVEL_2403_1_PID    0x2413  /* SeaPORT+4 (2403) Port 1 */
-#define SEALEVEL_2403_2_PID    0x2423  /* SeaPORT+4 (2403) Port 2 */
-#define SEALEVEL_2403_3_PID    0x2433  /* SeaPORT+4 (2403) Port 3 */
-#define SEALEVEL_2403_4_PID    0x2443  /* SeaPORT+4 (2403) Port 4 */
-#define SEALEVEL_2801_1_PID    0X2811  /* SeaLINK+8/232 (2801) Port 1 */
-#define SEALEVEL_2801_2_PID    0X2821  /* SeaLINK+8/232 (2801) Port 2 */
-#define SEALEVEL_2801_3_PID    0X2831  /* SeaLINK+8/232 (2801) Port 3 */
-#define SEALEVEL_2801_4_PID    0X2841  /* SeaLINK+8/232 (2801) Port 4 */
-#define SEALEVEL_2801_5_PID    0X2851  /* SeaLINK+8/232 (2801) Port 5 */
-#define SEALEVEL_2801_6_PID    0X2861  /* SeaLINK+8/232 (2801) Port 6 */
-#define SEALEVEL_2801_7_PID    0X2871  /* SeaLINK+8/232 (2801) Port 7 */
-#define SEALEVEL_2801_8_PID    0X2881  /* SeaLINK+8/232 (2801) Port 8 */
-#define SEALEVEL_2802_1_PID    0X2812  /* SeaLINK+8/485 (2802) Port 1 */
-#define SEALEVEL_2802_2_PID    0X2822  /* SeaLINK+8/485 (2802) Port 2 */
-#define SEALEVEL_2802_3_PID    0X2832  /* SeaLINK+8/485 (2802) Port 3 */
-#define SEALEVEL_2802_4_PID    0X2842  /* SeaLINK+8/485 (2802) Port 4 */
-#define SEALEVEL_2802_5_PID    0X2852  /* SeaLINK+8/485 (2802) Port 5 */
-#define SEALEVEL_2802_6_PID    0X2862  /* SeaLINK+8/485 (2802) Port 6 */
-#define SEALEVEL_2802_7_PID    0X2872  /* SeaLINK+8/485 (2802) Port 7 */
-#define SEALEVEL_2802_8_PID    0X2882  /* SeaLINK+8/485 (2802) Port 8 */
-#define SEALEVEL_2803_1_PID    0X2813  /* SeaLINK+8 (2803) Port 1 */
-#define SEALEVEL_2803_2_PID    0X2823  /* SeaLINK+8 (2803) Port 2 */
-#define SEALEVEL_2803_3_PID    0X2833  /* SeaLINK+8 (2803) Port 3 */
-#define SEALEVEL_2803_4_PID    0X2843  /* SeaLINK+8 (2803) Port 4 */
-#define SEALEVEL_2803_5_PID    0X2853  /* SeaLINK+8 (2803) Port 5 */
-#define SEALEVEL_2803_6_PID    0X2863  /* SeaLINK+8 (2803) Port 6 */
-#define SEALEVEL_2803_7_PID    0X2873  /* SeaLINK+8 (2803) Port 7 */
-#define SEALEVEL_2803_8_PID    0X2883  /* SeaLINK+8 (2803) Port 8 */
-
-/*
- * JETI SPECTROMETER SPECBOS 1201
- * http://www.jeti.com/products/sys/scb/scb1201.php
- */
-#define JETI_VID               0x0c6c
-#define JETI_SPC1201_PID       0x04b2
-
-/*
- * FTDI USB UART chips used in construction projects from the
- * Elektor Electronics magazine (http://elektor-electronics.co.uk)
- */
-#define ELEKTOR_VID            0x0C7D
-#define ELEKTOR_FT323R_PID     0x0005  /* RFID-Reader, issue 09-2006 */
-
-/*
- * Posiflex inc retail equipment (http://www.posiflex.com.tw)
- */
-#define POSIFLEX_VID           0x0d3a  /* Vendor ID */
-#define POSIFLEX_PP7000_PID    0x0300  /* PP-7000II thermal printer */
-
-/*
- * The following are the values for two KOBIL chipcard terminals.
- */
-#define KOBIL_VID              0x0d46  /* KOBIL Vendor ID */
-#define KOBIL_CONV_B1_PID      0x2020  /* KOBIL Konverter for B1 */
-#define KOBIL_CONV_KAAN_PID    0x2021  /* KOBIL_Konverter for KAAN */
-
-#define FTDI_NF_RIC_VID        0x0DCD  /* Vendor Id */
-#define FTDI_NF_RIC_PID        0x0001  /* Product Id */
-
-/*
- * Falcom Wireless Communications GmbH
- */
-#define FALCOM_VID             0x0F94  /* Vendor Id */
-#define FALCOM_TWIST_PID       0x0001  /* Falcom Twist USB GPRS modem */
-#define FALCOM_SAMBA_PID       0x0005  /* Falcom Samba USB GPRS modem */
-
-/* Larsen and Brusgaard AltiTrack/USBtrack  */
-#define LARSENBRUSGAARD_VID            0x0FD8
-#define LB_ALTITRACK_PID               0x0001
-
-/*
- * TTi (Thurlby Thandar Instruments)
- */
-#define TTI_VID                        0x103E  /* Vendor Id */
-#define TTI_QL355P_PID         0x03E8  /* TTi QL355P power supply */
-
-/* Interbiometrics USB I/O Board */
-/* Developed for Interbiometrics by Rudolf Gugler */
-#define INTERBIOMETRICS_VID              0x1209
-#define INTERBIOMETRICS_IOBOARD_PID      0x1002
-#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
-
-/*
- * Testo products (http://www.testo.com/)
- * Submitted by Colin Leroy
- */
-#define TESTO_VID                      0x128D
-#define TESTO_USB_INTERFACE_PID                0x0001
-
-/*
- * Mobility Electronics products.
- */
-#define MOBILITY_VID                   0x1342
-#define MOBILITY_USB_SERIAL_PID                0x0202  /* EasiDock USB 200 serial */
-
-/*
- * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
- * Submitted by Harald Welte <laforge@openmoko.org>
- */
-#define        FIC_VID                 0x1457
-#define        FIC_NEO1973_DEBUG_PID   0x5118
-
-/* Olimex */
-#define OLIMEX_VID                     0x15BA
-#define OLIMEX_ARM_USB_OCD_PID         0x0003
-
-/*
- * Telldus Technologies
- */
-#define TELLDUS_VID                    0x1781  /* Vendor ID */
-#define TELLDUS_TELLSTICK_PID          0x0C30  /* RF control dongle 433 MHz using FT232RL */
-
-/*
- * RT Systems programming cables for various ham radios
- */
-#define RTSYSTEMS_VID                  0x2100  /* Vendor ID */
-#define RTSYSTEMS_SERIAL_VX7_PID       0x9e52  /* Serial converter for VX-7 Radios using FT232RL */
-#define RTSYSTEMS_CT29B_PID            0x9e54  /* CT29B Radio Cable */
-
-/*
- * Bayer Ascensia Contour blood glucose meter USB-converter cable.
- * http://winglucofacts.com/cables/
- */
-#define BAYER_VID                      0x1A79
-#define BAYER_CONTOUR_CABLE_PID        0x6001
-
-/*
- * The following are the values for the Matrix Orbital FTDI Range
- * Anything in this range will use an FT232RL.
- */
-#define MTXORB_VID                     0x1B3D
-#define MTXORB_FTDI_RANGE_0100_PID     0x0100
-#define MTXORB_FTDI_RANGE_0101_PID     0x0101
-#define MTXORB_FTDI_RANGE_0102_PID     0x0102
-#define MTXORB_FTDI_RANGE_0103_PID     0x0103
-#define MTXORB_FTDI_RANGE_0104_PID     0x0104
-#define MTXORB_FTDI_RANGE_0105_PID     0x0105
-#define MTXORB_FTDI_RANGE_0106_PID     0x0106
-#define MTXORB_FTDI_RANGE_0107_PID     0x0107
-#define MTXORB_FTDI_RANGE_0108_PID     0x0108
-#define MTXORB_FTDI_RANGE_0109_PID     0x0109
-#define MTXORB_FTDI_RANGE_010A_PID     0x010A
-#define MTXORB_FTDI_RANGE_010B_PID     0x010B
-#define MTXORB_FTDI_RANGE_010C_PID     0x010C
-#define MTXORB_FTDI_RANGE_010D_PID     0x010D
-#define MTXORB_FTDI_RANGE_010E_PID     0x010E
-#define MTXORB_FTDI_RANGE_010F_PID     0x010F
-#define MTXORB_FTDI_RANGE_0110_PID     0x0110
-#define MTXORB_FTDI_RANGE_0111_PID     0x0111
-#define MTXORB_FTDI_RANGE_0112_PID     0x0112
-#define MTXORB_FTDI_RANGE_0113_PID     0x0113
-#define MTXORB_FTDI_RANGE_0114_PID     0x0114
-#define MTXORB_FTDI_RANGE_0115_PID     0x0115
-#define MTXORB_FTDI_RANGE_0116_PID     0x0116
-#define MTXORB_FTDI_RANGE_0117_PID     0x0117
-#define MTXORB_FTDI_RANGE_0118_PID     0x0118
-#define MTXORB_FTDI_RANGE_0119_PID     0x0119
-#define MTXORB_FTDI_RANGE_011A_PID     0x011A
-#define MTXORB_FTDI_RANGE_011B_PID     0x011B
-#define MTXORB_FTDI_RANGE_011C_PID     0x011C
-#define MTXORB_FTDI_RANGE_011D_PID     0x011D
-#define MTXORB_FTDI_RANGE_011E_PID     0x011E
-#define MTXORB_FTDI_RANGE_011F_PID     0x011F
-#define MTXORB_FTDI_RANGE_0120_PID     0x0120
-#define MTXORB_FTDI_RANGE_0121_PID     0x0121
-#define MTXORB_FTDI_RANGE_0122_PID     0x0122
-#define MTXORB_FTDI_RANGE_0123_PID     0x0123
-#define MTXORB_FTDI_RANGE_0124_PID     0x0124
-#define MTXORB_FTDI_RANGE_0125_PID     0x0125
-#define MTXORB_FTDI_RANGE_0126_PID     0x0126
-#define MTXORB_FTDI_RANGE_0127_PID     0x0127
-#define MTXORB_FTDI_RANGE_0128_PID     0x0128
-#define MTXORB_FTDI_RANGE_0129_PID     0x0129
-#define MTXORB_FTDI_RANGE_012A_PID     0x012A
-#define MTXORB_FTDI_RANGE_012B_PID     0x012B
-#define MTXORB_FTDI_RANGE_012C_PID     0x012C
-#define MTXORB_FTDI_RANGE_012D_PID     0x012D
-#define MTXORB_FTDI_RANGE_012E_PID     0x012E
-#define MTXORB_FTDI_RANGE_012F_PID     0x012F
-#define MTXORB_FTDI_RANGE_0130_PID     0x0130
-#define MTXORB_FTDI_RANGE_0131_PID     0x0131
-#define MTXORB_FTDI_RANGE_0132_PID     0x0132
-#define MTXORB_FTDI_RANGE_0133_PID     0x0133
-#define MTXORB_FTDI_RANGE_0134_PID     0x0134
-#define MTXORB_FTDI_RANGE_0135_PID     0x0135
-#define MTXORB_FTDI_RANGE_0136_PID     0x0136
-#define MTXORB_FTDI_RANGE_0137_PID     0x0137
-#define MTXORB_FTDI_RANGE_0138_PID     0x0138
-#define MTXORB_FTDI_RANGE_0139_PID     0x0139
-#define MTXORB_FTDI_RANGE_013A_PID     0x013A
-#define MTXORB_FTDI_RANGE_013B_PID     0x013B
-#define MTXORB_FTDI_RANGE_013C_PID     0x013C
-#define MTXORB_FTDI_RANGE_013D_PID     0x013D
-#define MTXORB_FTDI_RANGE_013E_PID     0x013E
-#define MTXORB_FTDI_RANGE_013F_PID     0x013F
-#define MTXORB_FTDI_RANGE_0140_PID     0x0140
-#define MTXORB_FTDI_RANGE_0141_PID     0x0141
-#define MTXORB_FTDI_RANGE_0142_PID     0x0142
-#define MTXORB_FTDI_RANGE_0143_PID     0x0143
-#define MTXORB_FTDI_RANGE_0144_PID     0x0144
-#define MTXORB_FTDI_RANGE_0145_PID     0x0145
-#define MTXORB_FTDI_RANGE_0146_PID     0x0146
-#define MTXORB_FTDI_RANGE_0147_PID     0x0147
-#define MTXORB_FTDI_RANGE_0148_PID     0x0148
-#define MTXORB_FTDI_RANGE_0149_PID     0x0149
-#define MTXORB_FTDI_RANGE_014A_PID     0x014A
-#define MTXORB_FTDI_RANGE_014B_PID     0x014B
-#define MTXORB_FTDI_RANGE_014C_PID     0x014C
-#define MTXORB_FTDI_RANGE_014D_PID     0x014D
-#define MTXORB_FTDI_RANGE_014E_PID     0x014E
-#define MTXORB_FTDI_RANGE_014F_PID     0x014F
-#define MTXORB_FTDI_RANGE_0150_PID     0x0150
-#define MTXORB_FTDI_RANGE_0151_PID     0x0151
-#define MTXORB_FTDI_RANGE_0152_PID     0x0152
-#define MTXORB_FTDI_RANGE_0153_PID     0x0153
-#define MTXORB_FTDI_RANGE_0154_PID     0x0154
-#define MTXORB_FTDI_RANGE_0155_PID     0x0155
-#define MTXORB_FTDI_RANGE_0156_PID     0x0156
-#define MTXORB_FTDI_RANGE_0157_PID     0x0157
-#define MTXORB_FTDI_RANGE_0158_PID     0x0158
-#define MTXORB_FTDI_RANGE_0159_PID     0x0159
-#define MTXORB_FTDI_RANGE_015A_PID     0x015A
-#define MTXORB_FTDI_RANGE_015B_PID     0x015B
-#define MTXORB_FTDI_RANGE_015C_PID     0x015C
-#define MTXORB_FTDI_RANGE_015D_PID     0x015D
-#define MTXORB_FTDI_RANGE_015E_PID     0x015E
-#define MTXORB_FTDI_RANGE_015F_PID     0x015F
-#define MTXORB_FTDI_RANGE_0160_PID     0x0160
-#define MTXORB_FTDI_RANGE_0161_PID     0x0161
-#define MTXORB_FTDI_RANGE_0162_PID     0x0162
-#define MTXORB_FTDI_RANGE_0163_PID     0x0163
-#define MTXORB_FTDI_RANGE_0164_PID     0x0164
-#define MTXORB_FTDI_RANGE_0165_PID     0x0165
-#define MTXORB_FTDI_RANGE_0166_PID     0x0166
-#define MTXORB_FTDI_RANGE_0167_PID     0x0167
-#define MTXORB_FTDI_RANGE_0168_PID     0x0168
-#define MTXORB_FTDI_RANGE_0169_PID     0x0169
-#define MTXORB_FTDI_RANGE_016A_PID     0x016A
-#define MTXORB_FTDI_RANGE_016B_PID     0x016B
-#define MTXORB_FTDI_RANGE_016C_PID     0x016C
-#define MTXORB_FTDI_RANGE_016D_PID     0x016D
-#define MTXORB_FTDI_RANGE_016E_PID     0x016E
-#define MTXORB_FTDI_RANGE_016F_PID     0x016F
-#define MTXORB_FTDI_RANGE_0170_PID     0x0170
-#define MTXORB_FTDI_RANGE_0171_PID     0x0171
-#define MTXORB_FTDI_RANGE_0172_PID     0x0172
-#define MTXORB_FTDI_RANGE_0173_PID     0x0173
-#define MTXORB_FTDI_RANGE_0174_PID     0x0174
-#define MTXORB_FTDI_RANGE_0175_PID     0x0175
-#define MTXORB_FTDI_RANGE_0176_PID     0x0176
-#define MTXORB_FTDI_RANGE_0177_PID     0x0177
-#define MTXORB_FTDI_RANGE_0178_PID     0x0178
-#define MTXORB_FTDI_RANGE_0179_PID     0x0179
-#define MTXORB_FTDI_RANGE_017A_PID     0x017A
-#define MTXORB_FTDI_RANGE_017B_PID     0x017B
-#define MTXORB_FTDI_RANGE_017C_PID     0x017C
-#define MTXORB_FTDI_RANGE_017D_PID     0x017D
-#define MTXORB_FTDI_RANGE_017E_PID     0x017E
-#define MTXORB_FTDI_RANGE_017F_PID     0x017F
-#define MTXORB_FTDI_RANGE_0180_PID     0x0180
-#define MTXORB_FTDI_RANGE_0181_PID     0x0181
-#define MTXORB_FTDI_RANGE_0182_PID     0x0182
-#define MTXORB_FTDI_RANGE_0183_PID     0x0183
-#define MTXORB_FTDI_RANGE_0184_PID     0x0184
-#define MTXORB_FTDI_RANGE_0185_PID     0x0185
-#define MTXORB_FTDI_RANGE_0186_PID     0x0186
-#define MTXORB_FTDI_RANGE_0187_PID     0x0187
-#define MTXORB_FTDI_RANGE_0188_PID     0x0188
-#define MTXORB_FTDI_RANGE_0189_PID     0x0189
-#define MTXORB_FTDI_RANGE_018A_PID     0x018A
-#define MTXORB_FTDI_RANGE_018B_PID     0x018B
-#define MTXORB_FTDI_RANGE_018C_PID     0x018C
-#define MTXORB_FTDI_RANGE_018D_PID     0x018D
-#define MTXORB_FTDI_RANGE_018E_PID     0x018E
-#define MTXORB_FTDI_RANGE_018F_PID     0x018F
-#define MTXORB_FTDI_RANGE_0190_PID     0x0190
-#define MTXORB_FTDI_RANGE_0191_PID     0x0191
-#define MTXORB_FTDI_RANGE_0192_PID     0x0192
-#define MTXORB_FTDI_RANGE_0193_PID     0x0193
-#define MTXORB_FTDI_RANGE_0194_PID     0x0194
-#define MTXORB_FTDI_RANGE_0195_PID     0x0195
-#define MTXORB_FTDI_RANGE_0196_PID     0x0196
-#define MTXORB_FTDI_RANGE_0197_PID     0x0197
-#define MTXORB_FTDI_RANGE_0198_PID     0x0198
-#define MTXORB_FTDI_RANGE_0199_PID     0x0199
-#define MTXORB_FTDI_RANGE_019A_PID     0x019A
-#define MTXORB_FTDI_RANGE_019B_PID     0x019B
-#define MTXORB_FTDI_RANGE_019C_PID     0x019C
-#define MTXORB_FTDI_RANGE_019D_PID     0x019D
-#define MTXORB_FTDI_RANGE_019E_PID     0x019E
-#define MTXORB_FTDI_RANGE_019F_PID     0x019F
-#define MTXORB_FTDI_RANGE_01A0_PID     0x01A0
-#define MTXORB_FTDI_RANGE_01A1_PID     0x01A1
-#define MTXORB_FTDI_RANGE_01A2_PID     0x01A2
-#define MTXORB_FTDI_RANGE_01A3_PID     0x01A3
-#define MTXORB_FTDI_RANGE_01A4_PID     0x01A4
-#define MTXORB_FTDI_RANGE_01A5_PID     0x01A5
-#define MTXORB_FTDI_RANGE_01A6_PID     0x01A6
-#define MTXORB_FTDI_RANGE_01A7_PID     0x01A7
-#define MTXORB_FTDI_RANGE_01A8_PID     0x01A8
-#define MTXORB_FTDI_RANGE_01A9_PID     0x01A9
-#define MTXORB_FTDI_RANGE_01AA_PID     0x01AA
-#define MTXORB_FTDI_RANGE_01AB_PID     0x01AB
-#define MTXORB_FTDI_RANGE_01AC_PID     0x01AC
-#define MTXORB_FTDI_RANGE_01AD_PID     0x01AD
-#define MTXORB_FTDI_RANGE_01AE_PID     0x01AE
-#define MTXORB_FTDI_RANGE_01AF_PID     0x01AF
-#define MTXORB_FTDI_RANGE_01B0_PID     0x01B0
-#define MTXORB_FTDI_RANGE_01B1_PID     0x01B1
-#define MTXORB_FTDI_RANGE_01B2_PID     0x01B2
-#define MTXORB_FTDI_RANGE_01B3_PID     0x01B3
-#define MTXORB_FTDI_RANGE_01B4_PID     0x01B4
-#define MTXORB_FTDI_RANGE_01B5_PID     0x01B5
-#define MTXORB_FTDI_RANGE_01B6_PID     0x01B6
-#define MTXORB_FTDI_RANGE_01B7_PID     0x01B7
-#define MTXORB_FTDI_RANGE_01B8_PID     0x01B8
-#define MTXORB_FTDI_RANGE_01B9_PID     0x01B9
-#define MTXORB_FTDI_RANGE_01BA_PID     0x01BA
-#define MTXORB_FTDI_RANGE_01BB_PID     0x01BB
-#define MTXORB_FTDI_RANGE_01BC_PID     0x01BC
-#define MTXORB_FTDI_RANGE_01BD_PID     0x01BD
-#define MTXORB_FTDI_RANGE_01BE_PID     0x01BE
-#define MTXORB_FTDI_RANGE_01BF_PID     0x01BF
-#define MTXORB_FTDI_RANGE_01C0_PID     0x01C0
-#define MTXORB_FTDI_RANGE_01C1_PID     0x01C1
-#define MTXORB_FTDI_RANGE_01C2_PID     0x01C2
-#define MTXORB_FTDI_RANGE_01C3_PID     0x01C3
-#define MTXORB_FTDI_RANGE_01C4_PID     0x01C4
-#define MTXORB_FTDI_RANGE_01C5_PID     0x01C5
-#define MTXORB_FTDI_RANGE_01C6_PID     0x01C6
-#define MTXORB_FTDI_RANGE_01C7_PID     0x01C7
-#define MTXORB_FTDI_RANGE_01C8_PID     0x01C8
-#define MTXORB_FTDI_RANGE_01C9_PID     0x01C9
-#define MTXORB_FTDI_RANGE_01CA_PID     0x01CA
-#define MTXORB_FTDI_RANGE_01CB_PID     0x01CB
-#define MTXORB_FTDI_RANGE_01CC_PID     0x01CC
-#define MTXORB_FTDI_RANGE_01CD_PID     0x01CD
-#define MTXORB_FTDI_RANGE_01CE_PID     0x01CE
-#define MTXORB_FTDI_RANGE_01CF_PID     0x01CF
-#define MTXORB_FTDI_RANGE_01D0_PID     0x01D0
-#define MTXORB_FTDI_RANGE_01D1_PID     0x01D1
-#define MTXORB_FTDI_RANGE_01D2_PID     0x01D2
-#define MTXORB_FTDI_RANGE_01D3_PID     0x01D3
-#define MTXORB_FTDI_RANGE_01D4_PID     0x01D4
-#define MTXORB_FTDI_RANGE_01D5_PID     0x01D5
-#define MTXORB_FTDI_RANGE_01D6_PID     0x01D6
-#define MTXORB_FTDI_RANGE_01D7_PID     0x01D7
-#define MTXORB_FTDI_RANGE_01D8_PID     0x01D8
-#define MTXORB_FTDI_RANGE_01D9_PID     0x01D9
-#define MTXORB_FTDI_RANGE_01DA_PID     0x01DA
-#define MTXORB_FTDI_RANGE_01DB_PID     0x01DB
-#define MTXORB_FTDI_RANGE_01DC_PID     0x01DC
-#define MTXORB_FTDI_RANGE_01DD_PID     0x01DD
-#define MTXORB_FTDI_RANGE_01DE_PID     0x01DE
-#define MTXORB_FTDI_RANGE_01DF_PID     0x01DF
-#define MTXORB_FTDI_RANGE_01E0_PID     0x01E0
-#define MTXORB_FTDI_RANGE_01E1_PID     0x01E1
-#define MTXORB_FTDI_RANGE_01E2_PID     0x01E2
-#define MTXORB_FTDI_RANGE_01E3_PID     0x01E3
-#define MTXORB_FTDI_RANGE_01E4_PID     0x01E4
-#define MTXORB_FTDI_RANGE_01E5_PID     0x01E5
-#define MTXORB_FTDI_RANGE_01E6_PID     0x01E6
-#define MTXORB_FTDI_RANGE_01E7_PID     0x01E7
-#define MTXORB_FTDI_RANGE_01E8_PID     0x01E8
-#define MTXORB_FTDI_RANGE_01E9_PID     0x01E9
-#define MTXORB_FTDI_RANGE_01EA_PID     0x01EA
-#define MTXORB_FTDI_RANGE_01EB_PID     0x01EB
-#define MTXORB_FTDI_RANGE_01EC_PID     0x01EC
-#define MTXORB_FTDI_RANGE_01ED_PID     0x01ED
-#define MTXORB_FTDI_RANGE_01EE_PID     0x01EE
-#define MTXORB_FTDI_RANGE_01EF_PID     0x01EF
-#define MTXORB_FTDI_RANGE_01F0_PID     0x01F0
-#define MTXORB_FTDI_RANGE_01F1_PID     0x01F1
-#define MTXORB_FTDI_RANGE_01F2_PID     0x01F2
-#define MTXORB_FTDI_RANGE_01F3_PID     0x01F3
-#define MTXORB_FTDI_RANGE_01F4_PID     0x01F4
-#define MTXORB_FTDI_RANGE_01F5_PID     0x01F5
-#define MTXORB_FTDI_RANGE_01F6_PID     0x01F6
-#define MTXORB_FTDI_RANGE_01F7_PID     0x01F7
-#define MTXORB_FTDI_RANGE_01F8_PID     0x01F8
-#define MTXORB_FTDI_RANGE_01F9_PID     0x01F9
-#define MTXORB_FTDI_RANGE_01FA_PID     0x01FA
-#define MTXORB_FTDI_RANGE_01FB_PID     0x01FB
-#define MTXORB_FTDI_RANGE_01FC_PID     0x01FC
-#define MTXORB_FTDI_RANGE_01FD_PID     0x01FD
-#define MTXORB_FTDI_RANGE_01FE_PID     0x01FE
-#define MTXORB_FTDI_RANGE_01FF_PID     0x01FF
-
-
-
-/*
- * The Mobility Lab (TML)
- * Submitted by Pierre Castella
- */
-#define TML_VID                        0x1B91  /* Vendor ID */
-#define TML_USB_SERIAL_PID     0x0064  /* USB - Serial Converter */
-
-/* Alti-2 products  http://www.alti-2.com */
-#define ALTI2_VID      0x1BC9
-#define ALTI2_N3_PID   0x6001  /* Neptune 3 */
-
-/*
- * Ionics PlugComputer
- */
-#define IONICS_VID                     0x1c0c
-#define IONICS_PLUGCOMPUTER_PID                0x0102
-
-/*
- * Dresden Elektronic Sensor Terminal Board
- */
-#define DE_VID                 0x1cf1 /* Vendor ID */
-#define STB_PID                        0x0001 /* Sensor Terminal Board */
-#define WHT_PID                        0x0004 /* Wireless Handheld Terminal */
-
-/*
- * Papouch products (http://www.papouch.com/)
- * Submitted by Folkert van Heusden
- */
-
-#define PAPOUCH_VID                    0x5050  /* Vendor ID */
-#define PAPOUCH_SB485_PID              0x0100  /* Papouch SB485 USB-485/422 Converter */
-#define PAPOUCH_AP485_PID              0x0101  /* AP485 USB-RS485 Converter */
-#define PAPOUCH_SB422_PID              0x0102  /* Papouch SB422 USB-RS422 Converter  */
-#define PAPOUCH_SB485_2_PID            0x0103  /* Papouch SB485 USB-485/422 Converter */
-#define PAPOUCH_AP485_2_PID            0x0104  /* AP485 USB-RS485 Converter */
-#define PAPOUCH_SB422_2_PID            0x0105  /* Papouch SB422 USB-RS422 Converter  */
-#define PAPOUCH_SB485S_PID             0x0106  /* Papouch SB485S USB-485/422 Converter */
-#define PAPOUCH_SB485C_PID             0x0107  /* Papouch SB485C USB-485/422 Converter */
-#define PAPOUCH_LEC_PID                        0x0300  /* LEC USB Converter */
-#define PAPOUCH_SB232_PID              0x0301  /* Papouch SB232 USB-RS232 Converter */
-#define PAPOUCH_TMU_PID                        0x0400  /* TMU USB Thermometer */
-#define PAPOUCH_IRAMP_PID              0x0500  /* Papouch IRAmp Duplex */
-#define PAPOUCH_DRAK5_PID              0x0700  /* Papouch DRAK5 */
-#define PAPOUCH_QUIDO8x8_PID           0x0800  /* Papouch Quido 8/8 Module */
-#define PAPOUCH_QUIDO4x4_PID           0x0900  /* Papouch Quido 4/4 Module */
-#define PAPOUCH_QUIDO2x2_PID           0x0a00  /* Papouch Quido 2/2 Module */
-#define PAPOUCH_QUIDO10x1_PID          0x0b00  /* Papouch Quido 10/1 Module */
-#define PAPOUCH_QUIDO30x3_PID          0x0c00  /* Papouch Quido 30/3 Module */
-#define PAPOUCH_QUIDO60x3_PID          0x0d00  /* Papouch Quido 60(100)/3 Module */
-#define PAPOUCH_QUIDO2x16_PID          0x0e00  /* Papouch Quido 2/16 Module */
-#define PAPOUCH_QUIDO3x32_PID          0x0f00  /* Papouch Quido 3/32 Module */
-#define PAPOUCH_DRAK6_PID              0x1000  /* Papouch DRAK6 */
-#define PAPOUCH_UPSUSB_PID             0x8000  /* Papouch UPS-USB adapter */
-#define PAPOUCH_MU_PID                 0x8001  /* MU controller */
-#define PAPOUCH_SIMUKEY_PID            0x8002  /* Papouch SimuKey */
-#define PAPOUCH_AD4USB_PID             0x8003  /* AD4USB Measurement Module */
-#define PAPOUCH_GMUX_PID               0x8004  /* Papouch GOLIATH MUX */
-#define PAPOUCH_GMSR_PID               0x8005  /* Papouch GOLIATH MSR */
-
-/*
- * Marvell SheevaPlug
- */
-#define MARVELL_VID            0x9e88
-#define MARVELL_SHEEVAPLUG_PID 0x9e8f
-
-/*
- * Evolution Robotics products (http://www.evolution.com/).
- * Submitted by Shawn M. Lavelle.
- */
-#define EVOLUTION_VID          0xDEEE  /* Vendor ID */
-#define EVOLUTION_ER1_PID      0x0300  /* ER1 Control Module */
-#define EVO_8U232AM_PID        0x02FF  /* Evolution robotics RCM2 (FT232AM)*/
-#define EVO_HYBRID_PID         0x0302  /* Evolution robotics RCM4 PID (FT232BM)*/
-#define EVO_RCM4_PID           0x0303  /* Evolution robotics RCM4 PID */
-
-/*
- * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403)
- */
-#define MJSG_GENERIC_PID       0x9378
-#define MJSG_SR_RADIO_PID      0x9379
-#define MJSG_XM_RADIO_PID      0x937A
-#define MJSG_HD_RADIO_PID      0x937C
-
-/*
- * Xverve Signalyzer tools (http://www.signalyzer.com/)
- */
-#define XVERVE_SIGNALYZER_ST_PID       0xBCA0
-#define XVERVE_SIGNALYZER_SLITE_PID    0xBCA1
-#define XVERVE_SIGNALYZER_SH2_PID      0xBCA2
-#define XVERVE_SIGNALYZER_SH4_PID      0xBCA4
-
-/*
- * Segway Robotic Mobility Platform USB interface (using VID 0x0403)
- * Submitted by John G. Rogers
- */
-#define SEGWAY_RMP200_PID      0xe729
-
-
-/*
- * Accesio USB Data Acquisition products (http://www.accesio.com/)
- */
-#define ACCESIO_COM4SM_PID     0xD578
-
-/* www.sciencescope.co.uk educational dataloggers */
-#define FTDI_SCIENCESCOPE_LOGBOOKML_PID                0xFF18
-#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID       0xFF1C
-#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID       0xFF1D
-
-/*
- * Milkymist One JTAG/Serial
- */
-#define QIHARDWARE_VID                 0x20B7
-#define MILKYMISTONE_JTAGSERIAL_PID    0x0713
-
index fbdbac5ae410a5ebade345e42d7cd025bf54b4e8..d4cc0f7af400d0133653e4e0515b25a131e2a37f 100644 (file)
@@ -1157,7 +1157,7 @@ static int download_fw(struct edgeport_serial *serial)
 
                        /* Check if we have an old version in the I2C and
                           update if necessary */
-                       if (download_cur_ver < download_new_ver) {
+                       if (download_cur_ver != download_new_ver) {
                                dbg("%s - Update I2C dld from %d.%d to %d.%d",
                                    __func__,
                                    firmware_version->Ver_Major,
index 2e0497b02260c3123ba130bd0fb37643c8b88f8d..95d8d26b9a44de53501d02ddd02f4d7e8c07fa1d 100644 (file)
@@ -312,7 +312,6 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
                kfree(port->read_urb->transfer_buffer);
                port->read_urb->transfer_buffer = buffer;
                port->read_urb->transfer_buffer_length = buffer_size;
-               port->bulk_in_buffer = buffer;
 
                buffer = kmalloc(buffer_size, GFP_KERNEL);
                if (!buffer) {
@@ -322,7 +321,6 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
                kfree(port->write_urb->transfer_buffer);
                port->write_urb->transfer_buffer = buffer;
                port->write_urb->transfer_buffer_length = buffer_size;
-               port->bulk_out_buffer = buffer;
                port->bulk_out_size = buffer_size;
        }
 
index 682508289b44d3a0017f24e40df88adec331c7b9..3a7873806f465aaf02a41213df7c2d41e5f866f1 100644 (file)
@@ -310,7 +310,6 @@ err_cleanup:
                                usb_free_urb(priv->write_urb_pool[j]);
                        }
                }
-               kfree(priv);
                usb_set_serial_port_data(serial->port[i], NULL);
        }
        return -ENOMEM;
index 9d99e682f96d0794ed9fd7600d1d1f6e1a60a144..45ea694b3ae6a4900d0a04810ef9a8b149ad7c32 100644 (file)
@@ -345,8 +345,7 @@ static void kobil_close(struct usb_serial_port *port)
 
        /* FIXME: Add rts/dtr methods */
        if (port->write_urb) {
-               usb_poison_urb(port->write_urb);
-               kfree(port->write_urb->transfer_buffer);
+               usb_kill_urb(port->write_urb);
                usb_free_urb(port->write_urb);
                port->write_urb = NULL;
        }
index f3a73e7b948ce4c1809993a1fa32233b12c1228d..763e32a44be02eb790c0c7f66ed25e0bae947f19 100644 (file)
@@ -1466,9 +1466,6 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
 
        case TIOCGICOUNT:
                cnow = mos7720_port->icount;
-
-               memset(&icount, 0, sizeof(struct serial_icounter_struct));
-
                icount.cts = cnow.cts;
                icount.dsr = cnow.dsr;
                icount.rng = cnow.rng;
index 9fdcee2eca99c3941f2bc92f83c7045d35e2a92c..485fa9c5b10766698caabf03c580f92715c86bba 100644 (file)
  * by making a change here, in moschip_port_id_table, and in
  * moschip_id_table_combined
  */
-#define USB_VENDOR_ID_BANDB              0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2        0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_2P       0xBC00
-#define BANDB_DEVICE_ID_USO9ML2_4        0xAC24
-#define BANDB_DEVICE_ID_USO9ML2_4P       0xBC01
-#define BANDB_DEVICE_ID_US9ML2_2         0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4         0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2         0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4         0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2        0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_2P       0xBC02
-#define BANDB_DEVICE_ID_USOPTL4_4        0xAC44
-#define BANDB_DEVICE_ID_USOPTL4_4P       0xBC03
-#define BANDB_DEVICE_ID_USOPTL2_4        0xAC24
+#define USB_VENDOR_ID_BANDB             0x0856
+#define BANDB_DEVICE_ID_USO9ML2_2      0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_4      0xAC24
+#define BANDB_DEVICE_ID_US9ML2_2       0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4       0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2       0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4       0xAC32
+#define BANDB_DEVICE_ID_USOPTL4_2       0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_4       0xAC44
 
 /* This driver also supports
  * ATEN UC2324 device using Moschip MCS7840
@@ -189,18 +184,13 @@ static struct usb_device_id moschip_port_id_table[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
        {}                      /* terminating entry */
@@ -210,18 +200,13 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
-       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
        {}                      /* terminating entry */
@@ -295,19 +280,12 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
 {
        struct usb_device *dev = port->serial->dev;
        int ret = 0;
-       u8 *buf;
-
-       buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
 
        ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
-                             MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
+                             MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
                              MOS_WDR_TIMEOUT);
-       *val = buf[0];
        dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
-
-       kfree(buf);
+       *val = (*val) & 0x00ff;
        return ret;
 }
 
@@ -360,11 +338,6 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
        struct usb_device *dev = port->serial->dev;
        int ret = 0;
        __u16 Wval;
-       u8 *buf;
-
-       buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
 
        /* dbg("application number is %4x",
            (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
@@ -388,11 +361,9 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
                }
        }
        ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
-                             MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
+                             MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH,
                              MOS_WDR_TIMEOUT);
-       *val = buf[0];
-
-       kfree(buf);
+       *val = (*val) & 0x00ff;
        return ret;
 }
 
@@ -743,6 +714,7 @@ static void mos7840_bulk_in_callback(struct urb *urb)
        mos7840_port = urb->context;
        if (!mos7840_port) {
                dbg("%s", "NULL mos7840_port pointer");
+               mos7840_port->read_urb_busy = false;
                return;
        }
 
@@ -2287,9 +2259,6 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
        case TIOCGICOUNT:
                cnow = mos7840_port->icount;
                smp_rmb();
-
-               memset(&icount, 0, sizeof(struct serial_icounter_struct));
-
                icount.cts = cnow.cts;
                icount.dsr = cnow.dsr;
                icount.rng = cnow.rng;
index 061a083856738d711bf94902bc30c85e01e30bb8..5ceaa4c6be0901de3cbe2305ae3775904fec701c 100644 (file)
@@ -24,7 +24,6 @@ static int debug;
 
 static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
-       { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index db7cf086604623c8d5b8b9aafea21d63e7e04170..80f59b6350cbdcee5a9864ef51990c51e2309ad4 100644 (file)
@@ -99,8 +99,8 @@ static void opticon_bulk_callback(struct urb *urb)
                                available_room = tty_buffer_request_room(tty,
                                                                data_length);
                                if (available_room) {
-                                       tty_insert_flip_string(tty, data + 2,
-                                                              data_length);
+                                       tty_insert_flip_string(tty, data,
+                                                              available_room);
                                        tty_flip_buffer_push(tty);
                                }
                                tty_kref_put(tty);
@@ -134,7 +134,7 @@ exit:
                                                  priv->bulk_address),
                                  priv->bulk_in_buffer, priv->buffer_size,
                                  opticon_bulk_callback, priv);
-               result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
+               result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
                if (result)
                        dev_err(&port->dev,
                            "%s - failed resubmitting read urb, error %d\n",
index 9f189d24b1eef0f1898901d90ab6c3b2c5c71e52..4ee33b2f07f22020a797488cb5879eaecd4d174e 100644 (file)
@@ -165,10 +165,7 @@ static int  option_resume(struct usb_serial *serial);
 #define HUAWEI_PRODUCT_E143D                   0x143D
 #define HUAWEI_PRODUCT_E143E                   0x143E
 #define HUAWEI_PRODUCT_E143F                   0x143F
-#define HUAWEI_PRODUCT_K4505                   0x1464
-#define HUAWEI_PRODUCT_K3765                   0x1465
 #define HUAWEI_PRODUCT_E14AC                   0x14AC
-#define HUAWEI_PRODUCT_ETS1220                 0x1803
 
 #define QUANTA_VENDOR_ID                       0x0408
 #define QUANTA_PRODUCT_Q101                    0xEA02
@@ -229,7 +226,6 @@ static int  option_resume(struct usb_serial *serial);
 #define AMOI_PRODUCT_H01                       0x0800
 #define AMOI_PRODUCT_H01A                      0x7002
 #define AMOI_PRODUCT_H02                       0x0802
-#define AMOI_PRODUCT_SKYPEPHONE_S2             0x0407
 
 #define DELL_VENDOR_ID                         0x413C
 
@@ -292,9 +288,7 @@ static int  option_resume(struct usb_serial *serial);
 
 #define QUALCOMM_VENDOR_ID                     0x05C6
 
-#define CMOTECH_VENDOR_ID                      0x16d8
-#define CMOTECH_PRODUCT_6008                   0x6008
-#define CMOTECH_PRODUCT_6280                   0x6280
+#define MAXON_VENDOR_ID                                0x16d8
 
 #define TELIT_VENDOR_ID                                0x1bc7
 #define TELIT_PRODUCT_UC864E                   0x1003
@@ -320,7 +314,6 @@ static int  option_resume(struct usb_serial *serial);
 #define QISDA_PRODUCT_H21_4512                 0x4512
 #define QISDA_PRODUCT_H21_4523                 0x4523
 #define QISDA_PRODUCT_H20_4515                 0x4515
-#define QISDA_PRODUCT_H20_4518                 0x4518
 #define QISDA_PRODUCT_H20_4519                 0x4519
 
 /* TLAYTECH PRODUCTS */
@@ -339,24 +332,6 @@ static int  option_resume(struct usb_serial *serial);
 #define ALCATEL_VENDOR_ID                      0x1bbb
 #define ALCATEL_PRODUCT_X060S                  0x0000
 
-#define PIRELLI_VENDOR_ID                      0x1266
-#define PIRELLI_PRODUCT_C100_1                 0x1002
-#define PIRELLI_PRODUCT_C100_2                 0x1003
-#define PIRELLI_PRODUCT_1004                   0x1004
-#define PIRELLI_PRODUCT_1005                   0x1005
-#define PIRELLI_PRODUCT_1006                   0x1006
-#define PIRELLI_PRODUCT_1007                   0x1007
-#define PIRELLI_PRODUCT_1008                   0x1008
-#define PIRELLI_PRODUCT_1009                   0x1009
-#define PIRELLI_PRODUCT_100A                   0x100a
-#define PIRELLI_PRODUCT_100B                   0x100b
-#define PIRELLI_PRODUCT_100C                   0x100c
-#define PIRELLI_PRODUCT_100D                   0x100d
-#define PIRELLI_PRODUCT_100E                   0x100e
-#define PIRELLI_PRODUCT_100F                   0x100f
-#define PIRELLI_PRODUCT_1011                   0x1011
-#define PIRELLI_PRODUCT_1012                   0x1012
-
 /* Airplus products */
 #define AIRPLUS_VENDOR_ID                      0x1011
 #define AIRPLUS_PRODUCT_MCD650                 0x3198
@@ -373,16 +348,6 @@ static int  option_resume(struct usb_serial *serial);
 #define THINKWILL_VENDOR_ID    0x19f5
 #define THINKWILL_PRODUCT_ID   0x9909
 
-#define CINTERION_VENDOR_ID                    0x0681
-
-/* Olivetti products */
-#define OLIVETTI_VENDOR_ID                     0x0b3c
-#define OLIVETTI_PRODUCT_OLICARD100            0xc000
-
-/* Celot products */
-#define CELOT_VENDOR_ID                                0x211f
-#define CELOT_PRODUCT_CT680M                   0x6801
-
 static struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -480,10 +445,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
@@ -519,7 +481,6 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
-       { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) },
 
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) },             /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) },             /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
@@ -563,8 +524,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
-       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
-       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+       { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -586,7 +546,6 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
@@ -598,52 +557,38 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
-       /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
-       /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
@@ -677,180 +622,6 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
@@ -859,8 +630,6 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -870,7 +639,6 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
-       { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
        { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
        { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
@@ -882,38 +650,6 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
        { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
        { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
-       /* Pirelli  */
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) },
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) },
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) },
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) },
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
-       { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-       { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
-       { USB_DEVICE(LEADCORE_VENDOR_ID, LEADCORE_PRODUCT_LC1808) },  //zzc
-       { USB_DEVICE(SC8800G_VENDOR_ID,SC8800G_PRODUCT_ID)},
-       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
-       { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
-
-// cmy:
-       { USB_DEVICE(0x0685, 0x6000) },
-       { USB_DEVICE(0x1E89, 0x1E16) },
-       { USB_DEVICE(0x7693, 0x0001) },
-       { USB_DEVICE(0x1D09, 0x4308) },
-       { USB_DEVICE(0x1234, 0x0033) },
-       { USB_DEVICE(0xFEED, 0x0001) },
-       { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0017) },
-
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1036,26 +772,12 @@ static int option_probe(struct usb_serial *serial,
                        const struct usb_device_id *id)
 {
        struct option_intf_private *data;
-
        /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
        if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
                serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
                serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
                return -ENODEV;
 
-       /* Bandrich modem and AT command interface is 0xff */
-       if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID ||
-               serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) &&
-               serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
-               return -ENODEV;
-
-       /* Don't bind network interfaces on Huawei K3765 & K4505 */
-       if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
-               (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
-                       serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) &&
-               serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
-               return -ENODEV;
-
        data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
index ecb17081e003e3bebab25e1eeb5bf9810b3f0964..9ec1a49e23622a6787d93ec3280f62ce14aac93e 100644 (file)
@@ -95,7 +95,6 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
        { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
-       { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
        { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { }                                     /* Terminating entry */
index 01bc64b3eef301978553e59d2099c3d14451b58c..d640dc951568dc705c3d7c2ae22ea7321d9afe11 100644 (file)
 #define CRESSI_VENDOR_ID       0x04b8
 #define CRESSI_EDY_PRODUCT_ID  0x0521
 
-/* Zeagle dive computer interface */
-#define ZEAGLE_VENDOR_ID       0x04b8
-#define ZEAGLE_N2ITION3_PRODUCT_ID     0x0522
-
 /* Sony, USB data cable for CMD-Jxx mobile phones */
 #define SONY_VENDOR_ID         0x054c
 #define SONY_QN3USB_PRODUCT_ID 0x0437
index 8ab4ab2231ddfb4da64b0b7f5e48f4d6dc6df9f5..7528b8d57f1cc9048001244f780524b8cca17690 100644 (file)
@@ -47,35 +47,6 @@ static struct usb_device_id id_table[] = {
        {USB_DEVICE(0x05c6, 0x9221)},   /* Generic Gobi QDL device */
        {USB_DEVICE(0x05c6, 0x9231)},   /* Generic Gobi QDL device */
        {USB_DEVICE(0x1f45, 0x0001)},   /* Unknown Gobi QDL device */
-       {USB_DEVICE(0x413c, 0x8185)},   /* Dell Gobi 2000 QDL device (N0218, VU936) */
-       {USB_DEVICE(0x413c, 0x8186)},   /* Dell Gobi 2000 Modem device (N0218, VU936) */
-       {USB_DEVICE(0x05c6, 0x9224)},   /* Sony Gobi 2000 QDL device (N0279, VU730) */
-       {USB_DEVICE(0x05c6, 0x9225)},   /* Sony Gobi 2000 Modem device (N0279, VU730) */
-       {USB_DEVICE(0x05c6, 0x9244)},   /* Samsung Gobi 2000 QDL device (VL176) */
-       {USB_DEVICE(0x05c6, 0x9245)},   /* Samsung Gobi 2000 Modem device (VL176) */
-       {USB_DEVICE(0x03f0, 0x241d)},   /* HP Gobi 2000 QDL device (VP412) */
-       {USB_DEVICE(0x03f0, 0x251d)},   /* HP Gobi 2000 Modem device (VP412) */
-       {USB_DEVICE(0x05c6, 0x9214)},   /* Acer Gobi 2000 QDL device (VP413) */
-       {USB_DEVICE(0x05c6, 0x9215)},   /* Acer Gobi 2000 Modem device (VP413) */
-       {USB_DEVICE(0x05c6, 0x9264)},   /* Asus Gobi 2000 QDL device (VR305) */
-       {USB_DEVICE(0x05c6, 0x9265)},   /* Asus Gobi 2000 Modem device (VR305) */
-       {USB_DEVICE(0x05c6, 0x9234)},   /* Top Global Gobi 2000 QDL device (VR306) */
-       {USB_DEVICE(0x05c6, 0x9235)},   /* Top Global Gobi 2000 Modem device (VR306) */
-       {USB_DEVICE(0x05c6, 0x9274)},   /* iRex Technologies Gobi 2000 QDL device (VR307) */
-       {USB_DEVICE(0x05c6, 0x9275)},   /* iRex Technologies Gobi 2000 Modem device (VR307) */
-       {USB_DEVICE(0x1199, 0x9000)},   /* Sierra Wireless Gobi 2000 QDL device (VT773) */
-       {USB_DEVICE(0x1199, 0x9001)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9002)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9003)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9004)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9005)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9006)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9007)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9008)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x9009)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x1199, 0x900a)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
-       {USB_DEVICE(0x16d8, 0x8001)},   /* CMDTech Gobi 2000 QDL device (VU922) */
-       {USB_DEVICE(0x16d8, 0x8002)},   /* CMDTech Gobi 2000 Modem device (VU922) */
        { }                             /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 328578bdb455161bdcb9008317c2e8d8111e5bc1..5019325ba25dda93486a08757c68cba44cf0b4a0 100644 (file)
@@ -195,7 +195,6 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
 static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
        { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
-       { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
        { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
 
        { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
@@ -210,7 +209,6 @@ static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
        { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
        { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
-       { USB_DEVICE(0x1199, 0x0301) }, /* Sierra Wireless USB Dongle 250U */
        /* Sierra Wireless C597 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
        /* Sierra Wireless T598 */
@@ -569,17 +567,14 @@ static void sierra_indat_callback(struct urb *urb)
        } else {
                if (urb->actual_length) {
                        tty = tty_port_tty_get(&port->port);
-                       if (tty) {
-                               tty_buffer_request_room(tty,
-                                       urb->actual_length);
-                               tty_insert_flip_string(tty, data,
-                                       urb->actual_length);
-                               tty_flip_buffer_push(tty);
-
-                               tty_kref_put(tty);
-                               usb_serial_debug_data(debug, &port->dev,
-                                       __func__, urb->actual_length, data);
-                       }
+
+                       tty_buffer_request_room(tty, urb->actual_length);
+                       tty_insert_flip_string(tty, data, urb->actual_length);
+                       tty_flip_buffer_push(tty);
+
+                       tty_kref_put(tty);
+                       usb_serial_debug_data(debug, &port->dev, __func__,
+                               urb->actual_length, data);
                } else {
                        dev_dbg(&port->dev, "%s: empty read urb"
                                " received\n", __func__);
index c14087018887e62213cb75965c4e6fa9c519e403..ad1f9232292d264506b16cb5b5bd8a625dc2e88d 100644 (file)
@@ -249,7 +249,6 @@ static struct usb_serial_driver clie_3_5_device = {
        .throttle =             visor_throttle,
        .unthrottle =           visor_unthrottle,
        .attach =               clie_3_5_startup,
-       .release =              visor_release,
        .write =                visor_write,
        .write_room =           visor_write_room,
        .write_bulk_callback =  visor_write_bulk_callback,
index 38e3c3aa05a181a45101368bb681eb65b7df451d..4395c4100ec2096313933d029e8b25191eefe6c9 100644 (file)
@@ -120,7 +120,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
        }
        return result;
 }
-static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL);
+static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
 
 int sierra_ms_init(struct us_data *us)
 {
index 72150021cc79e3b663406b4c5b8b49dd2299da6c..c932f9053188b32857a6b331ad188b3d88b97706 100644 (file)
@@ -1147,8 +1147,8 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
                0 ),
 
 /* Reported by Jan Dumon <j.dumon@option.com>
- * These devices (wrongly) have a vendor-specific device descriptor.
- * These entries are needed so usb-storage can bind to their mass-storage
+ * This device (wrongly) has a vendor-specific device descriptor.
+ * The entry is needed so usb-storage can bind to it's mass-storage
  * interface as an interface driver */
 UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
                "Option",
@@ -1156,90 +1156,6 @@ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
                US_SC_DEVICE, US_PR_DEVICE, NULL,
                0 ),
 
-UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
-               "Option",
-               "GI 0451 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
-               "Option",
-               "GI 0451 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
-               "Option",
-               "GI 0452 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
-               "Option",
-               "GI 0461 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
-               "Option",
-               "GI 0461 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
-               "Option",
-               "GI 033x SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
-               "Option",
-               "GI 033x SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
-               "Option",
-               "GI 033x SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
-               "Option",
-               "GI 070x SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
-               "Option",
-               "GI 1505 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
-               "Option",
-               "GI 1509 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
-               "Option",
-               "GI 1515 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
-               "Option",
-               "GI 1215 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
-UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
-               "Option",
-               "GI 1505 SD-Card",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               0 ),
-
 /* Reported by Ben Efros <ben@pc-doctor.com> */
 UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
                "Seagate",
@@ -1858,21 +1774,6 @@ UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
                US_SC_DEVICE, US_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
-/* Reported by Hans de Goede <hdegoede@redhat.com>
- * These Appotech controllers are found in Picture Frames, they provide a
- * (buggy) emulation of a cdrom drive which contains the windows software
- * Uploading of pictures happens over the corresponding /dev/sg device. */
-UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
-               "BUILDWIN",
-               "Photo Frame",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               US_FL_BAD_SENSE ),
-UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
-               "BUILDWIN",
-               "Photo Frame",
-               US_SC_DEVICE, US_PR_DEVICE, NULL,
-               US_FL_BAD_SENSE ),
-
 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
                "ST",
                "2A",
index 5e20e6ec17197649b54db2f3f5cda0557dfdfd52..6615ac7fa60a6124e31257a2ce1b09cf871ed719 100644 (file)
@@ -196,12 +196,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state)
 {
        struct backlight_device *bd = to_backlight_device(dev);
 
-       mutex_lock(&bd->ops_lock);
-       if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
+       if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
+               mutex_lock(&bd->ops_lock);
                bd->props.state |= BL_CORE_SUSPENDED;
                backlight_update_status(bd);
+               mutex_unlock(&bd->ops_lock);
        }
-       mutex_unlock(&bd->ops_lock);
 
        return 0;
 }
@@ -210,12 +210,12 @@ static int backlight_resume(struct device *dev)
 {
        struct backlight_device *bd = to_backlight_device(dev);
 
-       mutex_lock(&bd->ops_lock);
-       if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
+       if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
+               mutex_lock(&bd->ops_lock);
                bd->props.state &= ~BL_CORE_SUSPENDED;
                backlight_update_status(bd);
+               mutex_unlock(&bd->ops_lock);
        }
-       mutex_unlock(&bd->ops_lock);
 
        return 0;
 }
index 73ab600efbf8b161a16f74e968a402414c24e324..9edb8d7c295f38ed02a31642b3eba3be1c5681c9 100644 (file)
@@ -137,51 +137,6 @@ static int mbp_dmi_match(const struct dmi_system_id *id)
 }
 
 static const struct dmi_system_id __initdata mbp_device_table[] = {
-       {
-               .callback       = mbp_dmi_match,
-               .ident          = "MacBook 1,1",
-               .matches        = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
-               },
-               .driver_data    = (void *)&intel_chipset_data,
-       },
-       {
-               .callback       = mbp_dmi_match,
-               .ident          = "MacBook 2,1",
-               .matches        = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
-               },
-               .driver_data    = (void *)&intel_chipset_data,
-       },
-       {
-               .callback       = mbp_dmi_match,
-               .ident          = "MacBook 3,1",
-               .matches        = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
-               },
-               .driver_data    = (void *)&intel_chipset_data,
-       },
-       {
-               .callback       = mbp_dmi_match,
-               .ident          = "MacBook 4,1",
-               .matches        = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
-               },
-               .driver_data    = (void *)&intel_chipset_data,
-       },
-       {
-               .callback       = mbp_dmi_match,
-               .ident          = "MacBook 4,2",
-               .matches        = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
-               },
-               .driver_data    = (void *)&intel_chipset_data,
-       },
        {
                .callback       = mbp_dmi_match,
                .ident          = "MacBookPro 3,1",
index 2c72a7ca736607d5bd565b6bc287020616f1773d..5cc36cfbf07be275ce2b8ea92104c41e4bda1996 100644 (file)
@@ -515,9 +515,9 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
        fbinfo->fbops = &bfin_t350mcqb_fb_ops;
        fbinfo->flags = FBINFO_FLAG_DEFAULT;
 
-       info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len +
-                               ACTIVE_VIDEO_MEM_OFFSET,
-                               &info->dma_handle, GFP_KERNEL);
+       info->fb_buffer =
+           dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
+                              GFP_KERNEL);
 
        if (NULL == info->fb_buffer) {
                printk(KERN_ERR DRIVER_NAME
@@ -587,8 +587,8 @@ out7:
 out6:
        fb_dealloc_cmap(&fbinfo->cmap);
 out4:
-       dma_free_coherent(NULL, fbinfo->fix.smem_len + ACTIVE_VIDEO_MEM_OFFSET,
-                        info->fb_buffer, info->dma_handle);
+       dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+                         info->dma_handle);
 out3:
        framebuffer_release(fbinfo);
 out2:
@@ -611,9 +611,8 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
        free_irq(info->irq, info);
 
        if (info->fb_buffer != NULL)
-               dma_free_coherent(NULL, fbinfo->fix.smem_len +
-                       ACTIVE_VIDEO_MEM_OFFSET, info->fb_buffer,
-                       info->dma_handle);
+               dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+                                 info->dma_handle);
 
        fb_dealloc_cmap(&fbinfo->cmap);
 
index c0a446560ab406f0184a8571204c9b5ba50e4194..eb12182b20598861937b46496b8716efcae1e366 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <linux/dmi.h>
-#include <linux/pci.h>
+
 #include <video/vga.h>
 
 static struct fb_var_screeninfo efifb_defined __initdata = {
@@ -39,31 +39,16 @@ enum {
        M_I20,          /* 20-Inch iMac */
        M_I20_SR,       /* 20-Inch iMac (Santa Rosa) */
        M_I24,          /* 24-Inch iMac */
-       M_I24_8_1,      /* 24-Inch iMac, 8,1th gen */
-       M_I24_10_1,     /* 24-Inch iMac, 10,1th gen */
-       M_I27_11_1,     /* 27-Inch iMac, 11,1th gen */
        M_MINI,         /* Mac Mini */
-       M_MINI_3_1,     /* Mac Mini, 3,1th gen */
-       M_MINI_4_1,     /* Mac Mini, 4,1th gen */
        M_MB,           /* MacBook */
        M_MB_2,         /* MacBook, 2nd rev. */
        M_MB_3,         /* MacBook, 3rd rev. */
-       M_MB_5_1,       /* MacBook, 5th rev. */
-       M_MB_6_1,       /* MacBook, 6th rev. */
-       M_MB_7_1,       /* MacBook, 7th rev. */
        M_MB_SR,        /* MacBook, 2nd gen, (Santa Rosa) */
        M_MBA,          /* MacBook Air */
        M_MBP,          /* MacBook Pro */
        M_MBP_2,        /* MacBook Pro 2nd gen */
-       M_MBP_2_2,      /* MacBook Pro 2,2nd gen */
        M_MBP_SR,       /* MacBook Pro (Santa Rosa) */
        M_MBP_4,        /* MacBook Pro, 4th gen */
-       M_MBP_5_1,    /* MacBook Pro, 5,1th gen */
-       M_MBP_5_2,      /* MacBook Pro, 5,2th gen */
-       M_MBP_5_3,      /* MacBook Pro, 5,3rd gen */
-       M_MBP_6_1,      /* MacBook Pro, 6,1th gen */
-       M_MBP_6_2,      /* MacBook Pro, 6,2th gen */
-       M_MBP_7_1,      /* MacBook Pro, 7,1th gen */
        M_UNKNOWN       /* placeholder */
 };
 
@@ -78,28 +63,13 @@ static struct efifb_dmi_info {
        [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
        [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
        [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
-       [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
-       [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
-       [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
        [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
-       [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
-       [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
        [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
-       [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
-       [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
-       [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
        [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
        [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
-       [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
        [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
-       [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
-       [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
-       [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
-       [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
-       [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
-       [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
        [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
 };
 
@@ -120,12 +90,7 @@ static struct dmi_system_id __initdata dmi_system_table[] = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
        /* At least one of these two will be right; maybe both? */
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -134,23 +99,13 @@ static struct dmi_system_id __initdata dmi_system_table[] = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
-       EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
-       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
        {},
 };
 
@@ -158,7 +113,7 @@ static int set_system(const struct dmi_system_id *id)
 {
        struct efifb_dmi_info *info = id->driver_data;
        if (info->base == 0)
-               return 0;
+               return -ENODEV;
 
        printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
                         "(%dx%d, stride %d)\n", id->ident,
@@ -166,55 +121,18 @@ static int set_system(const struct dmi_system_id *id)
                         info->stride);
 
        /* Trust the bootloader over the DMI tables */
-       if (screen_info.lfb_base == 0) {
-#if defined(CONFIG_PCI)
-               struct pci_dev *dev = NULL;
-               int found_bar = 0;
-#endif
+       if (screen_info.lfb_base == 0)
                screen_info.lfb_base = info->base;
+       if (screen_info.lfb_linelength == 0)
+               screen_info.lfb_linelength = info->stride;
+       if (screen_info.lfb_width == 0)
+               screen_info.lfb_width = info->width;
+       if (screen_info.lfb_height == 0)
+               screen_info.lfb_height = info->height;
+       if (screen_info.orig_video_isVGA == 0)
+               screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
 
-#if defined(CONFIG_PCI)
-               /* make sure that the address in the table is actually on a
-                * VGA device's PCI BAR */
-
-               for_each_pci_dev(dev) {
-                       int i;
-                       if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
-                               continue;
-                       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-                               resource_size_t start, end;
-
-                               start = pci_resource_start(dev, i);
-                               if (start == 0)
-                                       break;
-                               end = pci_resource_end(dev, i);
-                               if (screen_info.lfb_base >= start &&
-                                               screen_info.lfb_base < end) {
-                                       found_bar = 1;
-                               }
-                       }
-               }
-               if (!found_bar)
-                       screen_info.lfb_base = 0;
-#endif
-       }
-       if (screen_info.lfb_base) {
-               if (screen_info.lfb_linelength == 0)
-                       screen_info.lfb_linelength = info->stride;
-               if (screen_info.lfb_width == 0)
-                       screen_info.lfb_width = info->width;
-               if (screen_info.lfb_height == 0)
-                       screen_info.lfb_height = info->height;
-               if (screen_info.orig_video_isVGA == 0)
-                       screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
-       } else {
-               screen_info.lfb_linelength = 0;
-               screen_info.lfb_width = 0;
-               screen_info.lfb_height = 0;
-               screen_info.orig_video_isVGA = 0;
-               return 0;
-       }
-       return 1;
+       return 0;
 }
 
 static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -243,17 +161,8 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
        return 0;
 }
 
-static void efifb_destroy(struct fb_info *info)
-{
-       if (info->screen_base)
-               iounmap(info->screen_base);
-       release_mem_region(info->aperture_base, info->aperture_size);
-       framebuffer_release(info);
-}
-
 static struct fb_ops efifb_ops = {
        .owner          = THIS_MODULE,
-       .fb_destroy     = efifb_destroy,
        .fb_setcolreg   = efifb_setcolreg,
        .fb_fillrect    = cfb_fillrect,
        .fb_copyarea    = cfb_copyarea,
@@ -372,7 +281,7 @@ static int __init efifb_probe(struct platform_device *dev)
        info->par = NULL;
 
        info->aperture_base = efifb_fix.smem_start;
-       info->aperture_size = size_remap;
+       info->aperture_size = size_total;
 
        info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
index b043ac83c41265b884bbe07e2510519be4ba91dd..4d8c54c23dd7e320289fadec313d52ec5f333392 100644 (file)
@@ -282,17 +282,8 @@ static int offb_set_par(struct fb_info *info)
        return 0;
 }
 
-static void offb_destroy(struct fb_info *info)
-{
-       if (info->screen_base)
-               iounmap(info->screen_base);
-       release_mem_region(info->aperture_base, info->aperture_size);
-       framebuffer_release(info);
-}
-
 static struct fb_ops offb_ops = {
        .owner          = THIS_MODULE,
-       .fb_destroy     = offb_destroy,
        .fb_setcolreg   = offb_setcolreg,
        .fb_set_par     = offb_set_par,
        .fb_blank       = offb_blank,
@@ -491,14 +482,10 @@ static void __init offb_init_fb(const char *name, const char *full_name,
        var->sync = 0;
        var->vmode = FB_VMODE_NONINTERLACED;
 
-       /* set offb aperture size for generic probing */
-       info->aperture_base = address;
-       info->aperture_size = fix->smem_len;
-
        info->fbops = &offb_ops;
        info->screen_base = ioremap(address, fix->smem_len);
        info->pseudo_palette = (void *) (info + 1);
-       info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian;
+       info->flags = FBINFO_DEFAULT | foreign_endian;
 
        fb_alloc_cmap(&info->cmap, 256, 0);
 
index e1836d7140a8cd10938d36f32a50fd8fa5b2581e..a4e05e4d7501256e5311a1a2fc7e808c3d05f1f5 100644 (file)
@@ -1701,9 +1701,6 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                break;
 
           case FBIOGET_VBLANK:
-
-               memset(&sisvbblank, 0, sizeof(struct fb_vblank));
-
                sisvbblank.count = 0;
                sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
 
index 3803745d6eee75068f072929c13db42f10eaa1eb..18b950706cad47f70ee83282a7cb7d905d902ef6 100644 (file)
@@ -242,27 +242,11 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
 static int __devinit e3d_pci_register(struct pci_dev *pdev,
                                      const struct pci_device_id *ent)
 {
-       struct device_node *of_node;
-       const char *device_type;
        struct fb_info *info;
        struct e3d_info *ep;
        unsigned int line_length;
        int err;
 
-       of_node = pci_device_to_OF_node(pdev);
-       if (!of_node) {
-               printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
-                      pci_name(pdev));
-               return -ENODEV;
-       }
-
-       device_type = of_get_property(of_node, "device_type", NULL);
-       if (!device_type) {
-               printk(KERN_INFO "e3d: Ignoring secondary output device "
-                      "at %s\n", pci_name(pdev));
-               return -ENODEV;
-       }
-
        err = pci_enable_device(pdev);
        if (err < 0) {
                printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
@@ -281,7 +265,13 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
        ep->info = info;
        ep->pdev = pdev;
        spin_lock_init(&ep->lock);
-       ep->of_node = of_node;
+       ep->of_node = pci_device_to_OF_node(pdev);
+       if (!ep->of_node) {
+               printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
+                      pci_name(pdev));
+               err = -ENODEV;
+               goto err_release_fb;
+       }
 
        /* Read the PCI base register of the frame buffer, which we
         * need in order to interpret the RAMDAC_VID_*FB* values in
@@ -410,7 +400,6 @@ static void __devexit e3d_pci_unregister(struct pci_dev *pdev)
 
 static struct pci_device_id e3d_pci_table[] = {
        {       PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a0),        },
-       {       PCI_DEVICE(0x1091, 0x7a0),                      },
        {       PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a2),        },
        {       .vendor = PCI_VENDOR_ID_3DLABS,
                .device = PCI_ANY_ID,
index 12b5990bf456fdf7e04b9311cc2b3552b1127e68..9d4f3a49ba4a5038a333c940351dd85cf5df0deb 100644 (file)
@@ -277,12 +277,11 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
                writel(tmp, engine + 0x1C);
        }
 
-       if (op == VIA_BITBLT_FILL) {
-               writel(fg_color, engine + 0x58);
-       } else if (op == VIA_BITBLT_MONO) {
+       if (op != VIA_BITBLT_COLOR)
                writel(fg_color, engine + 0x4C);
+
+       if (op == VIA_BITBLT_MONO)
                writel(bg_color, engine + 0x50);
-       }
 
        if (op == VIA_BITBLT_FILL)
                ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
index 4d553d0b8d7a450b9337a9b4375856c5b304217c..da03c074e32aad8b909c4b11c57a4c5a9f9d9285 100644 (file)
@@ -25,8 +25,6 @@ int viafb_ioctl_get_viafb_info(u_long arg)
 {
        struct viafb_ioctl_info viainfo;
 
-       memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
-
        viainfo.viafb_id = VIAID;
        viainfo.vendor_id = PCI_VIA_VENDOR_ID;
 
index 5a1dad2964d635d85d17d5fa907d2dd512b1dbb4..2376f688ec8b69addc7c4f0439bd62e127d8482b 100644 (file)
@@ -857,9 +857,9 @@ unsigned long w100fb_gpio_read(int port)
 void w100fb_gpio_write(int port, unsigned long value)
 {
        if (port==W100_GPIO_PORT_A)
-               writel(value, remapped_regs + mmGPIO_DATA);
+               value = writel(value, remapped_regs + mmGPIO_DATA);
        else
-               writel(value, remapped_regs + mmGPIO_DATA2);
+               value = writel(value, remapped_regs + mmGPIO_DATA2);
 }
 EXPORT_SYMBOL(w100fb_gpio_read);
 EXPORT_SYMBOL(w100fb_gpio_write);
index d43859f76db248a39a23d39cdb47d73aca8e5393..28d9cf7cf72f9fd1ee8aeea96085da394d48702e 100644 (file)
@@ -473,8 +473,7 @@ static void vp_del_vqs(struct virtio_device *vdev)
 
        list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
                info = vq->priv;
-               if (vp_dev->per_vq_vectors &&
-                       info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+               if (vp_dev->per_vq_vectors)
                        free_irq(vp_dev->msix_entries[info->msix_vector].vector,
                                 vq);
                vp_del_vq(vq);
@@ -635,9 +634,6 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
        INIT_LIST_HEAD(&vp_dev->virtqueues);
        spin_lock_init(&vp_dev->lock);
 
-       /* Disable MSI/MSIX to bring device to a known good state. */
-       pci_msi_off(pci_dev);
-
        /* enable the device */
        err = pci_enable_device(pci_dev);
        if (err)
index 17726a05a0a62849bcf09918b622b45fbd761351..1ed3d554e3728a1f7b6fb51501b1df52d26f514d 100644 (file)
@@ -115,8 +115,9 @@ static struct w1_therm_family_converter w1_therm_families[] = {
 
 static inline int w1_DS18B20_convert_temp(u8 rom[9])
 {
-       s16 t = le16_to_cpup((__le16 *)rom);
-       return t*1000/16;
+       int t = ((s16)rom[1] << 8) | rom[0];
+       t = t*1000/16;
+       return t;
 }
 
 static inline int w1_DS18S20_convert_temp(u8 rom[9])
index 2159e668751cd7db5e9ddfb39c96984573801539..c7b3f9df2317387401961450f9746ed093a97e27 100644 (file)
@@ -1,8 +1,9 @@
 /*
  * Blackfin On-Chip Watchdog Driver
+ *  Supports BF53[123]/BF53[467]/BF54[2489]/BF561
  *
  * Originally based on softdog.c
- * Copyright 2006-2010 Analog Devices Inc.
+ * Copyright 2006-2007 Analog Devices Inc.
  * Copyright 2006-2007 Michele d'Amico
  * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
  *
@@ -136,15 +137,13 @@ static int bfin_wdt_running(void)
  */
 static int bfin_wdt_set_timeout(unsigned long t)
 {
-       u32 cnt, max_t, sclk;
+       u32 cnt;
        unsigned long flags;
 
-       sclk = get_sclk();
-       max_t = -1 / sclk;
-       cnt = t * sclk;
-       stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
+       stampit();
 
-       if (t > max_t) {
+       cnt = t * get_sclk();
+       if (cnt < get_sclk()) {
                printk(KERN_WARNING PFX "timeout value is too large\n");
                return -EINVAL;
        }
index 0b9190754e243760ca1e8c1c08c859fae3a35ad9..a6c5674c78e689414d1967d8b07fb90a28e12d3f 100644 (file)
@@ -443,7 +443,7 @@ static void hpwdt_ping(void)
 static int hpwdt_change_timer(int new_margin)
 {
        /* Arbitrary, can't find the card's limits */
-       if (new_margin < 5 || new_margin > 600) {
+       if (new_margin < 30 || new_margin > 600) {
                printk(KERN_WARNING
                        "hpwdt: New value passed in is invalid: %d seconds.\n",
                        new_margin);
index e2ebe084986b4e953bf9ff385c837ad0a7a5201c..4bdb7f1a90772d780037bf2456e3656c1ad6a1ae 100644 (file)
@@ -115,37 +115,8 @@ enum iTCO_chipsets {
        TCO_3420,       /* 3420 */
        TCO_3450,       /* 3450 */
        TCO_EP80579,    /* EP80579 */
-       TCO_CPT1,       /* Cougar Point */
-       TCO_CPT2,       /* Cougar Point Desktop */
-       TCO_CPT3,       /* Cougar Point Mobile */
-       TCO_CPT4,       /* Cougar Point */
-       TCO_CPT5,       /* Cougar Point */
-       TCO_CPT6,       /* Cougar Point */
-       TCO_CPT7,       /* Cougar Point */
-       TCO_CPT8,       /* Cougar Point */
-       TCO_CPT9,       /* Cougar Point */
-       TCO_CPT10,      /* Cougar Point */
-       TCO_CPT11,      /* Cougar Point */
-       TCO_CPT12,      /* Cougar Point */
-       TCO_CPT13,      /* Cougar Point */
-       TCO_CPT14,      /* Cougar Point */
-       TCO_CPT15,      /* Cougar Point */
-       TCO_CPT16,      /* Cougar Point */
-       TCO_CPT17,      /* Cougar Point */
-       TCO_CPT18,      /* Cougar Point */
-       TCO_CPT19,      /* Cougar Point */
-       TCO_CPT20,      /* Cougar Point */
-       TCO_CPT21,      /* Cougar Point */
-       TCO_CPT22,      /* Cougar Point */
-       TCO_CPT23,      /* Cougar Point */
-       TCO_CPT24,      /* Cougar Point */
-       TCO_CPT25,      /* Cougar Point */
-       TCO_CPT26,      /* Cougar Point */
-       TCO_CPT27,      /* Cougar Point */
-       TCO_CPT28,      /* Cougar Point */
-       TCO_CPT29,      /* Cougar Point */
-       TCO_CPT30,      /* Cougar Point */
-       TCO_CPT31,      /* Cougar Point */
+       TCO_CPTD,       /* CPT Desktop */
+       TCO_CPTM,       /* CPT Mobile */
 };
 
 static struct {
@@ -202,37 +173,8 @@ static struct {
        {"3420", 2},
        {"3450", 2},
        {"EP80579", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
-       {"Cougar Point", 2},
+       {"CPT Desktop", 2},
+       {"CPT Mobile", 2},
        {NULL, 0}
 };
 
@@ -317,37 +259,8 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
        { ITCO_PCI_DEVICE(0x3b14,                               TCO_3420)},
        { ITCO_PCI_DEVICE(0x3b16,                               TCO_3450)},
        { ITCO_PCI_DEVICE(0x5031,                               TCO_EP80579)},
-       { ITCO_PCI_DEVICE(0x1c41,                               TCO_CPT1)},
-       { ITCO_PCI_DEVICE(0x1c42,                               TCO_CPT2)},
-       { ITCO_PCI_DEVICE(0x1c43,                               TCO_CPT3)},
-       { ITCO_PCI_DEVICE(0x1c44,                               TCO_CPT4)},
-       { ITCO_PCI_DEVICE(0x1c45,                               TCO_CPT5)},
-       { ITCO_PCI_DEVICE(0x1c46,                               TCO_CPT6)},
-       { ITCO_PCI_DEVICE(0x1c47,                               TCO_CPT7)},
-       { ITCO_PCI_DEVICE(0x1c48,                               TCO_CPT8)},
-       { ITCO_PCI_DEVICE(0x1c49,                               TCO_CPT9)},
-       { ITCO_PCI_DEVICE(0x1c4a,                               TCO_CPT10)},
-       { ITCO_PCI_DEVICE(0x1c4b,                               TCO_CPT11)},
-       { ITCO_PCI_DEVICE(0x1c4c,                               TCO_CPT12)},
-       { ITCO_PCI_DEVICE(0x1c4d,                               TCO_CPT13)},
-       { ITCO_PCI_DEVICE(0x1c4e,                               TCO_CPT14)},
-       { ITCO_PCI_DEVICE(0x1c4f,                               TCO_CPT15)},
-       { ITCO_PCI_DEVICE(0x1c50,                               TCO_CPT16)},
-       { ITCO_PCI_DEVICE(0x1c51,                               TCO_CPT17)},
-       { ITCO_PCI_DEVICE(0x1c52,                               TCO_CPT18)},
-       { ITCO_PCI_DEVICE(0x1c53,                               TCO_CPT19)},
-       { ITCO_PCI_DEVICE(0x1c54,                               TCO_CPT20)},
-       { ITCO_PCI_DEVICE(0x1c55,                               TCO_CPT21)},
-       { ITCO_PCI_DEVICE(0x1c56,                               TCO_CPT22)},
-       { ITCO_PCI_DEVICE(0x1c57,                               TCO_CPT23)},
-       { ITCO_PCI_DEVICE(0x1c58,                               TCO_CPT24)},
-       { ITCO_PCI_DEVICE(0x1c59,                               TCO_CPT25)},
-       { ITCO_PCI_DEVICE(0x1c5a,                               TCO_CPT26)},
-       { ITCO_PCI_DEVICE(0x1c5b,                               TCO_CPT27)},
-       { ITCO_PCI_DEVICE(0x1c5c,                               TCO_CPT28)},
-       { ITCO_PCI_DEVICE(0x1c5d,                               TCO_CPT29)},
-       { ITCO_PCI_DEVICE(0x1c5e,                               TCO_CPT30)},
-       { ITCO_PCI_DEVICE(0x1c5f,                               TCO_CPT31)},
+       { ITCO_PCI_DEVICE(0x1c42,                               TCO_CPTD)},
+       { ITCO_PCI_DEVICE(0x1c43,                               TCO_CPTM)},
        { 0, },                 /* End of list */
 };
 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
index 141701598b3ec8d5def730020d805e82f4e8ccf5..ce602dd09bc18c60381c68f0f1310bb168817bed 100644 (file)
@@ -106,7 +106,6 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
 #define VALID_EVTCHN(chn)      ((chn) != 0)
 
 static struct irq_chip xen_dynamic_chip;
-static struct irq_chip xen_percpu_chip;
 
 /* Constructor for packed IRQ information. */
 static struct irq_info mk_unbound_info(void)
@@ -255,7 +254,7 @@ static void init_evtchn_cpu_bindings(void)
        }
 #endif
 
-       memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
+       memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
 }
 
 static inline void clear_evtchn(int port)
@@ -363,7 +362,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                irq = find_unbound_irq();
 
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_edge_irq, "event");
+                                             handle_level_irq, "event");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_evtchn_info(evtchn);
@@ -389,8 +388,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
                if (irq < 0)
                        goto out;
 
-               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
-                                             handle_percpu_irq, "ipi");
+               set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+                                             handle_level_irq, "ipi");
 
                bind_ipi.vcpu = cpu;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -430,8 +429,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 
                irq = find_unbound_irq();
 
-               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
-                                             handle_percpu_irq, "virq");
+               set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+                                             handle_level_irq, "virq");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -536,7 +535,6 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
        if (irq < 0)
                return irq;
 
-       irqflags |= IRQF_NO_SUSPEND;
        retval = request_irq(irq, handler, irqflags, devname, dev_id);
        if (retval != 0) {
                unbind_from_irq(irq);
@@ -930,16 +928,6 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
        .retrigger      = retrigger_dynirq,
 };
 
-static struct irq_chip xen_percpu_chip __read_mostly = {
-       .name           = "xen-percpu",
-
-       .disable        = disable_dynirq,
-       .mask           = disable_dynirq,
-       .unmask         = enable_dynirq,
-
-       .ack            = ack_dynirq,
-};
-
 void __init xen_init_IRQ(void)
 {
        int i;
index 7b547f53f65ee9267f19dd493a6ab7321310ed4c..eab33f1dbdf7013f451af491b882973ea017f718 100644 (file)
@@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t,
 #define PRINTF_BUFFER_SIZE 4096
        char *printf_buffer;
 
-       printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
+       printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
        if (printf_buffer == NULL)
                return -ENOMEM;
 
index aeabd95f698fdda0db494fbd0d99248409c2912a..f4ca0c7eb51cb12899132cb755679d9b362a6b18 100644 (file)
@@ -140,7 +140,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
 fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
 
 # Directories which we _might_ need to create, so we have a rule for them.
-firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
+firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
 
 quiet_cmd_mkdir = MKDIR   $(patsubst $(objtree)/%,%,$@)
       cmd_mkdir = mkdir -p $@
index 5fb43bd19688a11e5c55a6ca1b00713344a93daa..3902bf43a0883bfe4b92d384170362e53c910e0d 100644 (file)
@@ -114,7 +114,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
        P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
 
        /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
+       if (__mandatory_lock(inode))
                return -ENOLCK;
 
        if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
index b84a7695358dd2a4cb8afcd03a8b07a07e407eac..02a2c9340573cf5946e0083187353d4517319844 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1639,9 +1639,6 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
        if (unlikely(nr < 0))
                return -EINVAL;
 
-       if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
-               nr = LONG_MAX/sizeof(*iocbpp);
-
        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
                return -EFAULT;
 
index 42b60b04ea061fc01419b2330899f307e4bed895..c4e83537ead77501f03d7fcb3a748727f2624d5e 100644 (file)
@@ -723,7 +723,7 @@ static int __init init_misc_binfmt(void)
 {
        int err = register_filesystem(&bm_fs_type);
        if (!err) {
-               err = insert_binfmt(&misc_format);
+               err = register_binfmt(&misc_format);
                if (err)
                        unregister_filesystem(&bm_fs_type);
        }
index e696713687c5a5d50790e306e6824fc392fa6f4a..e0c9e71cc40422db3051e09741e10d1a9e738661 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -371,9 +371,6 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
 {
        struct bio *bio;
 
-       if (nr_iovecs > UIO_MAXIOV)
-               return NULL;
-
        bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
                      gfp_mask);
        if (unlikely(!bio))
@@ -704,12 +701,8 @@ static void bio_free_map_data(struct bio_map_data *bmd)
 static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
                                               gfp_t gfp_mask)
 {
-       struct bio_map_data *bmd;
+       struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
 
-       if (iov_count > UIO_MAXIOV)
-               return NULL;
-
-       bmd = kmalloc(sizeof(*bmd), gfp_mask);
        if (!bmd)
                return NULL;
 
@@ -838,12 +831,6 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
                start = uaddr >> PAGE_SHIFT;
 
-               /*
-                * Overflow, abort
-                */
-               if (end < start)
-                       return ERR_PTR(-EINVAL);
-
                nr_pages += end - start;
                len += iov[i].iov_len;
        }
@@ -971,12 +958,6 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
                unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
                unsigned long start = uaddr >> PAGE_SHIFT;
 
-               /*
-                * Overflow, abort
-                */
-               if (end < start)
-                       return ERR_PTR(-EINVAL);
-
                nr_pages += end - start;
                /*
                 * buffer must be aligned to at least hardsector size for now
@@ -1004,7 +985,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
                unsigned long start = uaddr >> PAGE_SHIFT;
                const int local_nr_pages = end - start;
                const int page_limit = cur_page + local_nr_pages;
-
+               
                ret = get_user_pages_fast(uaddr, local_nr_pages,
                                write_to_vm, &pages[cur_page]);
                if (ret < local_nr_pages) {
index 4e777ad0c92dba61d124013d5d926199dc49cd8f..fe03d10bc8b5f4e459d29eeeb9457ab70051d35b 100755 (executable)
@@ -406,7 +406,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
  *     NULL first argument is nfsd_sync_dir() and that's not a directory.
  */
  
-int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
+static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
 {
        return sync_blockdev(I_BDEV(filp->f_mapping->host));
 }
@@ -425,7 +425,6 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
                return NULL;
        return &ei->vfs_inode;
 }
-EXPORT_SYMBOL(block_fsync);
 
 static void bdev_destroy_inode(struct inode *inode)
 {
@@ -1177,12 +1176,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
        /*
         * hooks: /n/, see "layering violations".
         */
-       if (!for_part) {
-               ret = devcgroup_inode_permission(bdev->bd_inode, perm);
-               if (ret != 0) {
-                       bdput(bdev);
-                       return ret;
-               }
+       ret = devcgroup_inode_permission(bdev->bd_inode, perm);
+       if (ret != 0) {
+               bdput(bdev);
+               return ret;
        }
 
        lock_kernel();
index 12d7be8df561dfd93c0a3c86f7c6c7f44887dab8..36160424427124947179e7270bfb77e03dbc1686 100644 (file)
@@ -94,8 +94,7 @@ static int btrfs_xattr_get_acl(struct inode *inode, int type,
 /*
  * Needs to be called with fs_mutex held
  */
-static int btrfs_set_acl(struct btrfs_trans_handle *trans,
-                        struct inode *inode, struct posix_acl *acl, int type)
+static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
        int ret, size = 0;
        const char *name;
@@ -112,14 +111,12 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
        switch (type) {
        case ACL_TYPE_ACCESS:
                mode = inode->i_mode;
-               name = POSIX_ACL_XATTR_ACCESS;
-               if (acl) {
-                       ret = posix_acl_equiv_mode(acl, &mode);
-                       if (ret < 0)
-                               return ret;
-                       inode->i_mode = mode;
-               }
+               ret = posix_acl_equiv_mode(acl, &mode);
+               if (ret < 0)
+                       return ret;
                ret = 0;
+               inode->i_mode = mode;
+               name = POSIX_ACL_XATTR_ACCESS;
                break;
        case ACL_TYPE_DEFAULT:
                if (!S_ISDIR(inode->i_mode))
@@ -143,7 +140,8 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
                        goto out;
        }
 
-       ret = __btrfs_setxattr(trans, inode, name, value, size, 0);
+       ret = __btrfs_setxattr(inode, name, value, size, 0);
+
 out:
        kfree(value);
 
@@ -156,12 +154,9 @@ out:
 static int btrfs_xattr_set_acl(struct inode *inode, int type,
                               const void *value, size_t size)
 {
-       int ret;
+       int ret = 0;
        struct posix_acl *acl = NULL;
 
-       if (!is_owner_or_cap(inode))
-               return -EPERM;
-
        if (value) {
                acl = posix_acl_from_xattr(value, size);
                if (acl == NULL) {
@@ -172,7 +167,7 @@ static int btrfs_xattr_set_acl(struct inode *inode, int type,
                }
        }
 
-       ret = btrfs_set_acl(NULL, inode, acl, type);
+       ret = btrfs_set_acl(inode, acl, type);
 
        posix_acl_release(acl);
 
@@ -226,8 +221,7 @@ int btrfs_check_acl(struct inode *inode, int mask)
  * stuff has been fixed to work with that.  If the locking stuff changes, we
  * need to re-evaluate the acl locking stuff.
  */
-int btrfs_init_acl(struct btrfs_trans_handle *trans,
-                  struct inode *inode, struct inode *dir)
+int btrfs_init_acl(struct inode *inode, struct inode *dir)
 {
        struct posix_acl *acl = NULL;
        int ret = 0;
@@ -252,8 +246,7 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
                mode_t mode;
 
                if (S_ISDIR(inode->i_mode)) {
-                       ret = btrfs_set_acl(trans, inode, acl,
-                                           ACL_TYPE_DEFAULT);
+                       ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT);
                        if (ret)
                                goto failed;
                }
@@ -268,11 +261,10 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
                        inode->i_mode = mode;
                        if (ret > 0) {
                                /* we need an acl */
-                               ret = btrfs_set_acl(trans, inode, clone,
+                               ret = btrfs_set_acl(inode, clone,
                                                    ACL_TYPE_ACCESS);
                        }
                }
-               posix_acl_release(clone);
        }
 failed:
        posix_acl_release(acl);
@@ -302,7 +294,7 @@ int btrfs_acl_chmod(struct inode *inode)
 
        ret = posix_acl_chmod_masq(clone, inode->i_mode);
        if (!ret)
-               ret = btrfs_set_acl(NULL, inode, clone, ACL_TYPE_ACCESS);
+               ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS);
 
        posix_acl_release(clone);
 
@@ -328,8 +320,7 @@ int btrfs_acl_chmod(struct inode *inode)
        return 0;
 }
 
-int btrfs_init_acl(struct btrfs_trans_handle *trans,
-                  struct inode *inode, struct inode *dir)
+int btrfs_init_acl(struct inode *inode, struct inode *dir)
 {
        return 0;
 }
index 3f1f50d9d916cf5e3c095101a3af0c220035f625..f6783a42f010965e344fddc68c2689a7d4a9fc14 100644 (file)
@@ -44,6 +44,9 @@ struct btrfs_inode {
         */
        struct extent_io_tree io_failure_tree;
 
+       /* held while inesrting or deleting extents from files */
+       struct mutex extent_mutex;
+
        /* held while logging the inode in tree-log.c */
        struct mutex log_mutex;
 
@@ -163,7 +166,7 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
 
 static inline void btrfs_i_size_write(struct inode *inode, u64 size)
 {
-       i_size_write(inode, size);
+       inode->i_size = size;
        BTRFS_I(inode)->disk_i_size = size;
 }
 
index c4bc570a396eebf5ede775dc39b004c9ff493227..ec96f3a6d536640919dd25a08c7ed22e4423ef15 100644 (file)
@@ -37,11 +37,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
                              struct extent_buffer *src_buf);
 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                   struct btrfs_path *path, int level, int slot);
-static int setup_items_for_insert(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root, struct btrfs_path *path,
-                       struct btrfs_key *cpu_key, u32 *data_size,
-                       u32 total_data, u32 total_size, int nr);
-
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -456,8 +451,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                extent_buffer_get(cow);
                spin_unlock(&root->node_lock);
 
-               btrfs_free_tree_block(trans, root, buf->start, buf->len,
-                               parent_start, root->root_key.objectid, level);
+               btrfs_free_extent(trans, root, buf->start, buf->len,
+                                 parent_start, root->root_key.objectid,
+                                 level, 0);
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
@@ -472,8 +468,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                btrfs_set_node_ptr_generation(parent, parent_slot,
                                              trans->transid);
                btrfs_mark_buffer_dirty(parent);
-               btrfs_free_tree_block(trans, root, buf->start, buf->len,
-                               parent_start, root->root_key.objectid, level);
+               btrfs_free_extent(trans, root, buf->start, buf->len,
+                                 parent_start, root->root_key.objectid,
+                                 level, 0);
        }
        if (unlock_orig)
                btrfs_tree_unlock(buf);
@@ -1033,8 +1030,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                btrfs_tree_unlock(mid);
                /* once for the path */
                free_extent_buffer(mid);
-               ret = btrfs_free_tree_block(trans, root, mid->start, mid->len,
-                                           0, root->root_key.objectid, level);
+               ret = btrfs_free_extent(trans, root, mid->start, mid->len,
+                                       0, root->root_key.objectid, level, 1);
                /* once for the root ptr */
                free_extent_buffer(mid);
                return ret;
@@ -1098,10 +1095,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                                       1);
                        if (wret)
                                ret = wret;
-                       wret = btrfs_free_tree_block(trans, root,
-                                                    bytenr, blocksize, 0,
-                                                    root->root_key.objectid,
-                                                    level);
+                       wret = btrfs_free_extent(trans, root, bytenr,
+                                                blocksize, 0,
+                                                root->root_key.objectid,
+                                                level, 0);
                        if (wret)
                                ret = wret;
                } else {
@@ -1146,8 +1143,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                wret = del_ptr(trans, root, path, level + 1, pslot);
                if (wret)
                        ret = wret;
-               wret = btrfs_free_tree_block(trans, root, bytenr, blocksize,
-                                        0, root->root_key.objectid, level);
+               wret = btrfs_free_extent(trans, root, bytenr, blocksize,
+                                        0, root->root_key.objectid,
+                                        level, 0);
                if (wret)
                        ret = wret;
        } else {
@@ -2999,85 +2997,75 @@ again:
        return ret;
 }
 
-static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
-                                        struct btrfs_root *root,
-                                        struct btrfs_path *path, int ins_len)
+/*
+ * This function splits a single item into two items,
+ * giving 'new_key' to the new item and splitting the
+ * old one at split_offset (from the start of the item).
+ *
+ * The path may be released by this operation.  After
+ * the split, the path is pointing to the old item.  The
+ * new item is going to be in the same node as the old one.
+ *
+ * Note, the item being split must be smaller enough to live alone on
+ * a tree block with room for one extra struct btrfs_item
+ *
+ * This allows us to split the item in place, keeping a lock on the
+ * leaf the entire time.
+ */
+int btrfs_split_item(struct btrfs_trans_handle *trans,
+                    struct btrfs_root *root,
+                    struct btrfs_path *path,
+                    struct btrfs_key *new_key,
+                    unsigned long split_offset)
 {
-       struct btrfs_key key;
-       struct extent_buffer *leaf;
-       struct btrfs_file_extent_item *fi;
-       u64 extent_len = 0;
        u32 item_size;
-       int ret;
+       struct extent_buffer *leaf;
+       struct btrfs_key orig_key;
+       struct btrfs_item *item;
+       struct btrfs_item *new_item;
+       int ret = 0;
+       int slot;
+       u32 nritems;
+       u32 orig_offset;
+       struct btrfs_disk_key disk_key;
+       char *buf;
 
        leaf = path->nodes[0];
-       btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-
-       BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
-              key.type != BTRFS_EXTENT_CSUM_KEY);
-
-       if (btrfs_leaf_free_space(root, leaf) >= ins_len)
-               return 0;
+       btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
+       if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
+               goto split;
 
        item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-       if (key.type == BTRFS_EXTENT_DATA_KEY) {
-               fi = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-               extent_len = btrfs_file_extent_num_bytes(leaf, fi);
-       }
        btrfs_release_path(root, path);
 
-       path->keep_locks = 1;
        path->search_for_split = 1;
-       ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+       path->keep_locks = 1;
+
+       ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
        path->search_for_split = 0;
-       if (ret < 0)
-               goto err;
 
-       ret = -EAGAIN;
-       leaf = path->nodes[0];
        /* if our item isn't there or got smaller, return now */
-       if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
-               goto err;
-
-       if (key.type == BTRFS_EXTENT_DATA_KEY) {
-               fi = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-               if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
-                       goto err;
+       if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
+                                                       path->slots[0])) {
+               path->keep_locks = 0;
+               return -EAGAIN;
        }
 
        btrfs_set_path_blocking(path);
-       ret = split_leaf(trans, root, &key, path, ins_len, 1);
+       ret = split_leaf(trans, root, &orig_key, path,
+                        sizeof(struct btrfs_item), 1);
+       path->keep_locks = 0;
        BUG_ON(ret);
 
-       path->keep_locks = 0;
        btrfs_unlock_up_safe(path, 1);
-       return 0;
-err:
-       path->keep_locks = 0;
-       return ret;
-}
-
-static noinline int split_item(struct btrfs_trans_handle *trans,
-                              struct btrfs_root *root,
-                              struct btrfs_path *path,
-                              struct btrfs_key *new_key,
-                              unsigned long split_offset)
-{
-       struct extent_buffer *leaf;
-       struct btrfs_item *item;
-       struct btrfs_item *new_item;
-       int slot;
-       char *buf;
-       u32 nritems;
-       u32 item_size;
-       u32 orig_offset;
-       struct btrfs_disk_key disk_key;
-
        leaf = path->nodes[0];
        BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
 
+split:
+       /*
+        * make sure any changes to the path from split_leaf leave it
+        * in a blocking state
+        */
        btrfs_set_path_blocking(path);
 
        item = btrfs_item_nr(leaf, path->slots[0]);
@@ -3085,19 +3073,19 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
        item_size = btrfs_item_size(leaf, item);
 
        buf = kmalloc(item_size, GFP_NOFS);
-       if (!buf)
-               return -ENOMEM;
-
        read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
                            path->slots[0]), item_size);
-
        slot = path->slots[0] + 1;
+       leaf = path->nodes[0];
+
        nritems = btrfs_header_nritems(leaf);
+
        if (slot != nritems) {
                /* shift the items */
                memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
-                               btrfs_item_nr_offset(slot),
-                               (nritems - slot) * sizeof(struct btrfs_item));
+                             btrfs_item_nr_offset(slot),
+                             (nritems - slot) * sizeof(struct btrfs_item));
+
        }
 
        btrfs_cpu_key_to_disk(&disk_key, new_key);
@@ -3125,80 +3113,15 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
                            item_size - split_offset);
        btrfs_mark_buffer_dirty(leaf);
 
-       BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
+       ret = 0;
+       if (btrfs_leaf_free_space(root, leaf) < 0) {
+               btrfs_print_leaf(root, leaf);
+               BUG();
+       }
        kfree(buf);
-       return 0;
-}
-
-/*
- * This function splits a single item into two items,
- * giving 'new_key' to the new item and splitting the
- * old one at split_offset (from the start of the item).
- *
- * The path may be released by this operation.  After
- * the split, the path is pointing to the old item.  The
- * new item is going to be in the same node as the old one.
- *
- * Note, the item being split must be smaller enough to live alone on
- * a tree block with room for one extra struct btrfs_item
- *
- * This allows us to split the item in place, keeping a lock on the
- * leaf the entire time.
- */
-int btrfs_split_item(struct btrfs_trans_handle *trans,
-                    struct btrfs_root *root,
-                    struct btrfs_path *path,
-                    struct btrfs_key *new_key,
-                    unsigned long split_offset)
-{
-       int ret;
-       ret = setup_leaf_for_split(trans, root, path,
-                                  sizeof(struct btrfs_item));
-       if (ret)
-               return ret;
-
-       ret = split_item(trans, root, path, new_key, split_offset);
        return ret;
 }
 
-/*
- * This function duplicate a item, giving 'new_key' to the new item.
- * It guarantees both items live in the same tree leaf and the new item
- * is contiguous with the original item.
- *
- * This allows us to split file extent in place, keeping a lock on the
- * leaf the entire time.
- */
-int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
-                        struct btrfs_root *root,
-                        struct btrfs_path *path,
-                        struct btrfs_key *new_key)
-{
-       struct extent_buffer *leaf;
-       int ret;
-       u32 item_size;
-
-       leaf = path->nodes[0];
-       item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-       ret = setup_leaf_for_split(trans, root, path,
-                                  item_size + sizeof(struct btrfs_item));
-       if (ret)
-               return ret;
-
-       path->slots[0]++;
-       ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
-                                    item_size, item_size +
-                                    sizeof(struct btrfs_item), 1);
-       BUG_ON(ret);
-
-       leaf = path->nodes[0];
-       memcpy_extent_buffer(leaf,
-                            btrfs_item_ptr_offset(leaf, path->slots[0]),
-                            btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
-                            item_size);
-       return 0;
-}
-
 /*
  * make the item pointed to by the path smaller.  new_size indicates
  * how small to make it, and from_end tells us if we just chop bytes
@@ -3791,8 +3714,8 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
         */
        btrfs_unlock_up_safe(path, 0);
 
-       ret = btrfs_free_tree_block(trans, root, leaf->start, leaf->len,
-                                   0, root->root_key.objectid, 0);
+       ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
+                               0, root->root_key.objectid, 0, 0);
        return ret;
 }
 /*
index 9f806dd04c2704899bf28e5324eebaa14a923e27..444b3e9b92a4b88391fd610a76346530436baa01 100644 (file)
@@ -310,9 +310,6 @@ struct btrfs_header {
 #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
                                        sizeof(struct btrfs_item) - \
                                        sizeof(struct btrfs_file_extent_item))
-#define BTRFS_MAX_XATTR_SIZE(r)        (BTRFS_LEAF_DATA_SIZE(r) - \
-                                sizeof(struct btrfs_item) -\
-                                sizeof(struct btrfs_dir_item))
 
 
 /*
@@ -862,9 +859,8 @@ struct btrfs_fs_info {
        struct mutex ordered_operations_mutex;
        struct rw_semaphore extent_commit_sem;
 
-       struct rw_semaphore cleanup_work_sem;
-
        struct rw_semaphore subvol_sem;
+
        struct srcu_struct subvol_srcu;
 
        struct list_head trans_list;
@@ -872,9 +868,6 @@ struct btrfs_fs_info {
        struct list_head dead_roots;
        struct list_head caching_block_groups;
 
-       spinlock_t delayed_iput_lock;
-       struct list_head delayed_iputs;
-
        atomic_t nr_async_submits;
        atomic_t async_submit_draining;
        atomic_t nr_async_bios;
@@ -1041,12 +1034,12 @@ struct btrfs_root {
        int ref_cows;
        int track_dirty;
        int in_radix;
-       int clean_orphans;
 
        u64 defrag_trans_start;
        struct btrfs_key defrag_progress;
        struct btrfs_key defrag_max;
        int defrag_running;
+       int defrag_level;
        char *name;
        int in_sysfs;
 
@@ -1982,10 +1975,6 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
                                        u64 hint, u64 empty_size);
-int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
-                         struct btrfs_root *root,
-                         u64 bytenr, u32 blocksize,
-                         u64 parent, u64 root_objectid, int level);
 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
                                            struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize,
@@ -2100,10 +2089,6 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
                     struct btrfs_path *path,
                     struct btrfs_key *new_key,
                     unsigned long split_offset);
-int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
-                        struct btrfs_root *root,
-                        struct btrfs_path *path,
-                        struct btrfs_key *new_key);
 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
                      *root, struct btrfs_key *key, struct btrfs_path *p, int
                      ins_len, int cow);
@@ -2211,10 +2196,9 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
                              struct btrfs_path *path,
                              struct btrfs_dir_item *di);
 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
-                           struct btrfs_root *root,
-                           struct btrfs_path *path, u64 objectid,
-                           const char *name, u16 name_len,
-                           const void *data, u16 data_len);
+                           struct btrfs_root *root, const char *name,
+                           u16 name_len, const void *data, u16 data_len,
+                           u64 dir);
 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
                                          struct btrfs_root *root,
                                          struct btrfs_path *path, u64 dir,
@@ -2308,7 +2292,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               struct inode *inode, u64 new_size,
                               u32 min_type);
 
-int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
+int btrfs_start_delalloc_inodes(struct btrfs_root *root);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end);
 int btrfs_writepages(struct address_space *mapping,
                     struct writeback_control *wbc);
@@ -2348,8 +2332,6 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
 void btrfs_orphan_cleanup(struct btrfs_root *root);
 int btrfs_cont_expand(struct inode *inode, loff_t size);
 int btrfs_invalidate_inodes(struct btrfs_root *root);
-void btrfs_add_delayed_iput(struct inode *inode);
-void btrfs_run_delayed_iputs(struct btrfs_root *root);
 extern const struct dentry_operations btrfs_dentry_operations;
 
 /* ioctl.c */
@@ -2363,9 +2345,12 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                            int skip_pinned);
 int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
 extern const struct file_operations btrfs_file_operations;
-int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
-                      u64 start, u64 end, u64 *hint_byte, int drop_cache);
+int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+                      struct btrfs_root *root, struct inode *inode,
+                      u64 start, u64 end, u64 locked_end,
+                      u64 inline_limit, u64 *hint_block, int drop_cache);
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root,
                              struct inode *inode, u64 start, u64 end);
 int btrfs_release_file(struct inode *inode, struct file *file);
 
@@ -2395,8 +2380,7 @@ int btrfs_check_acl(struct inode *inode, int mask);
 #else
 #define btrfs_check_acl NULL
 #endif
-int btrfs_init_acl(struct btrfs_trans_handle *trans,
-                  struct inode *inode, struct inode *dir);
+int btrfs_init_acl(struct inode *inode, struct inode *dir);
 int btrfs_acl_chmod(struct inode *inode);
 
 /* relocation.c */
index e9103b3baa49f4309c94eca9e0d7b7069665359c..f3a6075519ccc1d96e42157f95769a5ad6641a12 100644 (file)
@@ -68,12 +68,12 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
  * into the tree
  */
 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
-                           struct btrfs_root *root,
-                           struct btrfs_path *path, u64 objectid,
-                           const char *name, u16 name_len,
-                           const void *data, u16 data_len)
+                           struct btrfs_root *root, const char *name,
+                           u16 name_len, const void *data, u16 data_len,
+                           u64 dir)
 {
        int ret = 0;
+       struct btrfs_path *path;
        struct btrfs_dir_item *dir_item;
        unsigned long name_ptr, data_ptr;
        struct btrfs_key key, location;
@@ -81,11 +81,15 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
        u32 data_size;
 
-       BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root));
-
-       key.objectid = objectid;
+       key.objectid = dir;
        btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
        key.offset = btrfs_name_hash(name, name_len);
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+       if (name_len + data_len + sizeof(struct btrfs_dir_item) >
+           BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item))
+               return -ENOSPC;
 
        data_size = sizeof(*dir_item) + name_len + data_len;
        dir_item = insert_with_overflow(trans, root, path, &key, data_size,
@@ -113,6 +117,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
        write_extent_buffer(leaf, data, data_ptr, data_len);
        btrfs_mark_buffer_dirty(path->nodes[0]);
 
+       btrfs_free_path(path);
        return ret;
 }
 
index 2b59201b955ca533bcb10a251df07ff2d7c549d2..02b6afbd745020fb51bf43d88c93025459687e12 100644 (file)
@@ -892,8 +892,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->stripesize = stripesize;
        root->ref_cows = 0;
        root->track_dirty = 0;
-       root->in_radix = 0;
-       root->clean_orphans = 0;
 
        root->fs_info = fs_info;
        root->objectid = objectid;
@@ -930,6 +928,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->defrag_trans_start = fs_info->generation;
        init_completion(&root->kobj_unregister);
        root->defrag_running = 0;
+       root->defrag_level = 0;
        root->root_key.objectid = objectid;
        root->anon_super.s_root = NULL;
        root->anon_super.s_dev = 0;
@@ -981,12 +980,12 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
 
        while (1) {
                ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
-                               0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
+                                   0, &start, &end, EXTENT_DIRTY);
                if (ret)
                        break;
 
-               clear_extent_bits(&log_root_tree->dirty_log_pages, start, end,
-                                 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
+               clear_extent_dirty(&log_root_tree->dirty_log_pages,
+                                  start, end, GFP_NOFS);
        }
        eb = fs_info->log_root_tree->node;
 
@@ -1211,10 +1210,8 @@ again:
        ret = radix_tree_insert(&fs_info->fs_roots_radix,
                                (unsigned long)root->root_key.objectid,
                                root);
-       if (ret == 0) {
+       if (ret == 0)
                root->in_radix = 1;
-               root->clean_orphans = 1;
-       }
        spin_unlock(&fs_info->fs_roots_radix_lock);
        radix_tree_preload_end();
        if (ret) {
@@ -1228,6 +1225,10 @@ again:
        ret = btrfs_find_dead_roots(fs_info->tree_root,
                                    root->root_key.objectid);
        WARN_ON(ret);
+
+       if (!(fs_info->sb->s_flags & MS_RDONLY))
+               btrfs_orphan_cleanup(root);
+
        return root;
 fail:
        free_fs_root(root);
@@ -1476,7 +1477,6 @@ static int cleaner_kthread(void *arg)
 
                if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
                    mutex_trylock(&root->fs_info->cleaner_mutex)) {
-                       btrfs_run_delayed_iputs(root);
                        btrfs_clean_old_snapshots(root);
                        mutex_unlock(&root->fs_info->cleaner_mutex);
                }
@@ -1606,7 +1606,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
        INIT_LIST_HEAD(&fs_info->trans_list);
        INIT_LIST_HEAD(&fs_info->dead_roots);
-       INIT_LIST_HEAD(&fs_info->delayed_iputs);
        INIT_LIST_HEAD(&fs_info->hashers);
        INIT_LIST_HEAD(&fs_info->delalloc_inodes);
        INIT_LIST_HEAD(&fs_info->ordered_operations);
@@ -1615,7 +1614,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->new_trans_lock);
        spin_lock_init(&fs_info->ref_cache_lock);
        spin_lock_init(&fs_info->fs_roots_radix_lock);
-       spin_lock_init(&fs_info->delayed_iput_lock);
 
        init_completion(&fs_info->kobj_unregister);
        fs_info->tree_root = tree_root;
@@ -1691,7 +1689,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        mutex_init(&fs_info->cleaner_mutex);
        mutex_init(&fs_info->volume_mutex);
        init_rwsem(&fs_info->extent_commit_sem);
-       init_rwsem(&fs_info->cleanup_work_sem);
        init_rwsem(&fs_info->subvol_sem);
 
        btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
@@ -1982,12 +1979,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        if (!(sb->s_flags & MS_RDONLY)) {
                ret = btrfs_recover_relocation(tree_root);
-               if (ret < 0) {
-                       printk(KERN_WARNING
-                              "btrfs: failed to recover relocation\n");
-                       err = -EINVAL;
-                       goto fail_trans_kthread;
-               }
+               BUG_ON(ret);
        }
 
        location.objectid = BTRFS_FS_TREE_OBJECTID;
@@ -1998,12 +1990,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        if (!fs_info->fs_root)
                goto fail_trans_kthread;
 
-       if (!(sb->s_flags & MS_RDONLY)) {
-               down_read(&fs_info->cleanup_work_sem);
-               btrfs_orphan_cleanup(fs_info->fs_root);
-               up_read(&fs_info->cleanup_work_sem);
-       }
-
        return tree_root;
 
 fail_trans_kthread:
@@ -2400,14 +2386,8 @@ int btrfs_commit_super(struct btrfs_root *root)
        int ret;
 
        mutex_lock(&root->fs_info->cleaner_mutex);
-       btrfs_run_delayed_iputs(root);
        btrfs_clean_old_snapshots(root);
        mutex_unlock(&root->fs_info->cleaner_mutex);
-
-       /* wait until ongoing cleanup work done */
-       down_write(&root->fs_info->cleanup_work_sem);
-       up_write(&root->fs_info->cleanup_work_sem);
-
        trans = btrfs_start_transaction(root, 1);
        ret = btrfs_commit_transaction(trans, root);
        BUG_ON(ret);
index 559f72489b3bf02b4477369da854bf371cbbd0e4..94627c4cc193343aacc82b3aca1a6e9eaaefd8dd 100644 (file)
@@ -83,17 +83,6 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
        return (cache->flags & bits) == bits;
 }
 
-void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
-{
-       atomic_inc(&cache->count);
-}
-
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
-{
-       if (atomic_dec_and_test(&cache->count))
-               kfree(cache);
-}
-
 /*
  * this adds the block group to the fs_info rb tree for the block group
  * cache
@@ -167,7 +156,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
                }
        }
        if (ret)
-               btrfs_get_block_group(ret);
+               atomic_inc(&ret->count);
        spin_unlock(&info->block_group_cache_lock);
 
        return ret;
@@ -206,14 +195,6 @@ static int exclude_super_stripes(struct btrfs_root *root,
        int stripe_len;
        int i, nr, ret;
 
-       if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
-               stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
-               cache->bytes_super += stripe_len;
-               ret = add_excluded_extent(root, cache->key.objectid,
-                                         stripe_len);
-               BUG_ON(ret);
-       }
-
        for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
                bytenr = btrfs_sb_offset(i);
                ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
@@ -274,7 +255,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
                if (ret)
                        break;
 
-               if (extent_start <= start) {
+               if (extent_start == start) {
                        start = extent_end + 1;
                } else if (extent_start > start && extent_start < end) {
                        size = extent_start - start;
@@ -418,8 +399,6 @@ err:
 
        put_caching_control(caching_ctl);
        atomic_dec(&block_group->space_info->caching_threads);
-       btrfs_put_block_group(block_group);
-
        return 0;
 }
 
@@ -460,7 +439,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
        up_write(&fs_info->extent_commit_sem);
 
        atomic_inc(&cache->space_info->caching_threads);
-       btrfs_get_block_group(cache);
 
        tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
                          cache->key.objectid);
@@ -500,6 +478,12 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
        return cache;
 }
 
+void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
+{
+       if (atomic_dec_and_test(&cache->count))
+               kfree(cache);
+}
+
 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
                                                  u64 flags)
 {
@@ -2590,7 +2574,7 @@ next_block_group(struct btrfs_root *root,
        if (node) {
                cache = rb_entry(node, struct btrfs_block_group_cache,
                                 cache_node);
-               btrfs_get_block_group(cache);
+               atomic_inc(&cache->count);
        } else
                cache = NULL;
        spin_unlock(&root->fs_info->block_group_cache_lock);
@@ -2896,9 +2880,9 @@ static noinline void flush_delalloc_async(struct btrfs_work *work)
        root = async->root;
        info = async->info;
 
-       btrfs_start_delalloc_inodes(root, 0);
+       btrfs_start_delalloc_inodes(root);
        wake_up(&info->flush_wait);
-       btrfs_wait_ordered_extents(root, 0, 0);
+       btrfs_wait_ordered_extents(root, 0);
 
        spin_lock(&info->lock);
        info->flushing = 0;
@@ -2972,8 +2956,8 @@ static void flush_delalloc(struct btrfs_root *root,
        return;
 
 flush:
-       btrfs_start_delalloc_inodes(root, 0);
-       btrfs_wait_ordered_extents(root, 0, 0);
+       btrfs_start_delalloc_inodes(root);
+       btrfs_wait_ordered_extents(root, 0);
 
        spin_lock(&info->lock);
        info->flushing = 0;
@@ -3470,6 +3454,14 @@ static int update_block_group(struct btrfs_trans_handle *trans,
        else
                old_val -= num_bytes;
        btrfs_set_super_bytes_used(&info->super_copy, old_val);
+
+       /* block accounting for root item */
+       old_val = btrfs_root_used(&root->root_item);
+       if (alloc)
+               old_val += num_bytes;
+       else
+               old_val -= num_bytes;
+       btrfs_set_root_used(&root->root_item, old_val);
        spin_unlock(&info->delalloc_lock);
 
        while (total) {
@@ -4057,21 +4049,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
-                         struct btrfs_root *root,
-                         u64 bytenr, u32 blocksize,
-                         u64 parent, u64 root_objectid, int level)
-{
-       u64 used;
-       spin_lock(&root->node_lock);
-       used = btrfs_root_used(&root->root_item) - blocksize;
-       btrfs_set_root_used(&root->root_item, used);
-       spin_unlock(&root->node_lock);
-
-       return btrfs_free_extent(trans, root, bytenr, blocksize,
-                                parent, root_objectid, level, 0);
-}
-
 static u64 stripe_align(struct btrfs_root *root, u64 val)
 {
        u64 mask = ((u64)root->stripesize - 1);
@@ -4235,7 +4212,7 @@ search:
                u64 offset;
                int cached;
 
-               btrfs_get_block_group(block_group);
+               atomic_inc(&block_group->count);
                search_start = block_group->key.objectid;
 
 have_block_group:
@@ -4323,7 +4300,7 @@ have_block_group:
 
                                btrfs_put_block_group(block_group);
                                block_group = last_ptr->block_group;
-                               btrfs_get_block_group(block_group);
+                               atomic_inc(&block_group->count);
                                spin_unlock(&last_ptr->lock);
                                spin_unlock(&last_ptr->refill_lock);
 
@@ -4601,6 +4578,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
 {
        int ret;
        u64 search_start = 0;
+       struct btrfs_fs_info *info = root->fs_info;
 
        data = btrfs_get_alloc_profile(root, data);
 again:
@@ -4608,9 +4586,17 @@ again:
         * the only place that sets empty_size is btrfs_realloc_node, which
         * is not called recursively on allocations
         */
-       if (empty_size || root->ref_cows)
+       if (empty_size || root->ref_cows) {
+               if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
+                       ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+                                    2 * 1024 * 1024,
+                                    BTRFS_BLOCK_GROUP_METADATA |
+                                    (info->metadata_alloc_profile &
+                                     info->avail_metadata_alloc_bits), 0);
+               }
                ret = do_chunk_alloc(trans, root->fs_info->extent_root,
                                     num_bytes + 2 * 1024 * 1024, data, 0);
+       }
 
        WARN_ON(num_bytes < root->sectorsize);
        ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -4911,14 +4897,6 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
                                        extent_op);
                BUG_ON(ret);
        }
-
-       if (root_objectid == root->root_key.objectid) {
-               u64 used;
-               spin_lock(&root->node_lock);
-               used = btrfs_root_used(&root->root_item) + num_bytes;
-               btrfs_set_root_used(&root->root_item, used);
-               spin_unlock(&root->node_lock);
-       }
        return ret;
 }
 
@@ -4941,16 +4919,8 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
        btrfs_set_buffer_uptodate(buf);
 
        if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
-               /*
-                * we allow two log transactions at a time, use different
-                * EXENT bit to differentiate dirty pages.
-                */
-               if (root->log_transid % 2 == 0)
-                       set_extent_dirty(&root->dirty_log_pages, buf->start,
-                                       buf->start + buf->len - 1, GFP_NOFS);
-               else
-                       set_extent_new(&root->dirty_log_pages, buf->start,
-                                       buf->start + buf->len - 1, GFP_NOFS);
+               set_extent_dirty(&root->dirty_log_pages, buf->start,
+                        buf->start + buf->len - 1, GFP_NOFS);
        } else {
                set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
                         buf->start + buf->len - 1, GFP_NOFS);
@@ -5402,6 +5372,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
        int ret;
 
        while (level >= 0) {
+               if (path->slots[level] >=
+                   btrfs_header_nritems(path->nodes[level]))
+                       break;
+
                ret = walk_down_proc(trans, root, path, wc, lookup_info);
                if (ret > 0)
                        break;
@@ -5409,10 +5383,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
                if (level == 0)
                        break;
 
-               if (path->slots[level] >=
-                   btrfs_header_nritems(path->nodes[level]))
-                       break;
-
                ret = do_walk_down(trans, root, path, wc, &lookup_info);
                if (ret > 0) {
                        path->slots[level]++;
@@ -7403,7 +7373,9 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                        wait_block_group_cache_done(block_group);
 
                btrfs_remove_free_space_cache(block_group);
-               btrfs_put_block_group(block_group);
+
+               WARN_ON(atomic_read(&block_group->count) != 1);
+               kfree(block_group);
 
                spin_lock(&info->block_group_cache_lock);
        }
index b177ed3196126d9fe8e5d0f1507075a421e535b9..96577e8bf9fdb62819ab2dbd5f9da91200624596 100644 (file)
@@ -3165,9 +3165,10 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                spin_unlock(&tree->buffer_lock);
                goto free_eb;
        }
+       spin_unlock(&tree->buffer_lock);
+
        /* add one reference for the tree */
        atomic_inc(&eb->refs);
-       spin_unlock(&tree->buffer_lock);
        return eb;
 
 free_eb:
index a7fd9f3a750abe05ebd586e6db1dfda729a64aff..06550affbd27ea9181bd08db8d1ea18d9e8f42c0 100644 (file)
@@ -179,14 +179,18 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                }
                flags = em->flags;
                if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
-                       if (testend && em->start + em->len >= start + len) {
+                       if (em->start <= start &&
+                           (!testend || em->start + em->len >= start + len)) {
                                free_extent_map(em);
                                write_unlock(&em_tree->lock);
                                break;
                        }
-                       start = em->start + em->len;
-                       if (testend)
+                       if (start < em->start) {
+                               len = em->start - start;
+                       } else {
                                len = start + len - (em->start + em->len);
+                               start = em->start + em->len;
+                       }
                        free_extent_map(em);
                        write_unlock(&em_tree->lock);
                        continue;
@@ -261,253 +265,324 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  * If an extent intersects the range but is not entirely inside the range
  * it is either truncated or split.  Anything entirely inside the range
  * is deleted from the tree.
+ *
+ * inline_limit is used to tell this code which offsets in the file to keep
+ * if they contain inline extents.
  */
-int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
-                      u64 start, u64 end, u64 *hint_byte, int drop_cache)
+noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+                      struct btrfs_root *root, struct inode *inode,
+                      u64 start, u64 end, u64 locked_end,
+                      u64 inline_limit, u64 *hint_byte, int drop_cache)
 {
-       struct btrfs_root *root = BTRFS_I(inode)->root;
+       u64 extent_end = 0;
+       u64 search_start = start;
+       u64 ram_bytes = 0;
+       u64 disk_bytenr = 0;
+       u64 orig_locked_end = locked_end;
+       u8 compression;
+       u8 encryption;
+       u16 other_encoding = 0;
        struct extent_buffer *leaf;
-       struct btrfs_file_extent_item *fi;
+       struct btrfs_file_extent_item *extent;
        struct btrfs_path *path;
        struct btrfs_key key;
-       struct btrfs_key new_key;
-       u64 search_start = start;
-       u64 disk_bytenr = 0;
-       u64 num_bytes = 0;
-       u64 extent_offset = 0;
-       u64 extent_end = 0;
-       int del_nr = 0;
-       int del_slot = 0;
-       int extent_type;
+       struct btrfs_file_extent_item old;
+       int keep;
+       int slot;
+       int bookend;
+       int found_type = 0;
+       int found_extent;
+       int found_inline;
        int recow;
        int ret;
 
+       inline_limit = 0;
        if (drop_cache)
                btrfs_drop_extent_cache(inode, start, end - 1, 0);
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
-
        while (1) {
                recow = 0;
+               btrfs_release_path(root, path);
                ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
                                               search_start, -1);
                if (ret < 0)
-                       break;
-               if (ret > 0 && path->slots[0] > 0 && search_start == start) {
-                       leaf = path->nodes[0];
-                       btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
-                       if (key.objectid == inode->i_ino &&
-                           key.type == BTRFS_EXTENT_DATA_KEY)
-                               path->slots[0]--;
-               }
-               ret = 0;
-next_slot:
-               leaf = path->nodes[0];
-               if (path->slots[0] >= btrfs_header_nritems(leaf)) {
-                       BUG_ON(del_nr > 0);
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret < 0)
-                               break;
-                       if (ret > 0) {
+                       goto out;
+               if (ret > 0) {
+                       if (path->slots[0] == 0) {
                                ret = 0;
-                               break;
+                               goto out;
                        }
-                       leaf = path->nodes[0];
-                       recow = 1;
+                       path->slots[0]--;
                }
-
-               btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-               if (key.objectid > inode->i_ino ||
-                   key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
-                       break;
-
-               fi = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-               extent_type = btrfs_file_extent_type(leaf, fi);
-
-               if (extent_type == BTRFS_FILE_EXTENT_REG ||
-                   extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
-                       disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
-                       num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
-                       extent_offset = btrfs_file_extent_offset(leaf, fi);
-                       extent_end = key.offset +
-                               btrfs_file_extent_num_bytes(leaf, fi);
-               } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-                       extent_end = key.offset +
-                               btrfs_file_extent_inline_len(leaf, fi);
-               } else {
-                       WARN_ON(1);
-                       extent_end = search_start;
+next_slot:
+               keep = 0;
+               bookend = 0;
+               found_extent = 0;
+               found_inline = 0;
+               compression = 0;
+               encryption = 0;
+               extent = NULL;
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+               ret = 0;
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+               if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
+                   key.offset >= end) {
+                       goto out;
                }
-
-               if (extent_end <= search_start) {
-                       path->slots[0]++;
-                       goto next_slot;
+               if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
+                   key.objectid != inode->i_ino) {
+                       goto out;
                }
-
-               search_start = max(key.offset, start);
                if (recow) {
-                       btrfs_release_path(root, path);
+                       search_start = max(key.offset, start);
                        continue;
                }
+               if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
+                       extent = btrfs_item_ptr(leaf, slot,
+                                               struct btrfs_file_extent_item);
+                       found_type = btrfs_file_extent_type(leaf, extent);
+                       compression = btrfs_file_extent_compression(leaf,
+                                                                   extent);
+                       encryption = btrfs_file_extent_encryption(leaf,
+                                                                 extent);
+                       other_encoding = btrfs_file_extent_other_encoding(leaf,
+                                                                 extent);
+                       if (found_type == BTRFS_FILE_EXTENT_REG ||
+                           found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+                               extent_end =
+                                    btrfs_file_extent_disk_bytenr(leaf,
+                                                                  extent);
+                               if (extent_end)
+                                       *hint_byte = extent_end;
+
+                               extent_end = key.offset +
+                                    btrfs_file_extent_num_bytes(leaf, extent);
+                               ram_bytes = btrfs_file_extent_ram_bytes(leaf,
+                                                               extent);
+                               found_extent = 1;
+                       } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+                               found_inline = 1;
+                               extent_end = key.offset +
+                                    btrfs_file_extent_inline_len(leaf, extent);
+                       }
+               } else {
+                       extent_end = search_start;
+               }
 
-               /*
-                *     | - range to drop - |
-                *  | -------- extent -------- |
-                */
-               if (start > key.offset && end < extent_end) {
-                       BUG_ON(del_nr > 0);
-                       BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
-
-                       memcpy(&new_key, &key, sizeof(new_key));
-                       new_key.offset = start;
-                       ret = btrfs_duplicate_item(trans, root, path,
-                                                  &new_key);
-                       if (ret == -EAGAIN) {
-                               btrfs_release_path(root, path);
-                               continue;
+               /* we found nothing we can drop */
+               if ((!found_extent && !found_inline) ||
+                   search_start >= extent_end) {
+                       int nextret;
+                       u32 nritems;
+                       nritems = btrfs_header_nritems(leaf);
+                       if (slot >= nritems - 1) {
+                               nextret = btrfs_next_leaf(root, path);
+                               if (nextret)
+                                       goto out;
+                               recow = 1;
+                       } else {
+                               path->slots[0]++;
                        }
-                       if (ret < 0)
-                               break;
+                       goto next_slot;
+               }
 
-                       leaf = path->nodes[0];
-                       fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
-                                           struct btrfs_file_extent_item);
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       start - key.offset);
+               if (end <= extent_end && start >= key.offset && found_inline)
+                       *hint_byte = EXTENT_MAP_INLINE;
 
-                       fi = btrfs_item_ptr(leaf, path->slots[0],
-                                           struct btrfs_file_extent_item);
+               if (found_extent) {
+                       read_extent_buffer(leaf, &old, (unsigned long)extent,
+                                          sizeof(old));
+               }
 
-                       extent_offset += start - key.offset;
-                       btrfs_set_file_extent_offset(leaf, fi, extent_offset);
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       extent_end - start);
-                       btrfs_mark_buffer_dirty(leaf);
+               if (end < extent_end && end >= key.offset) {
+                       bookend = 1;
+                       if (found_inline && start <= key.offset)
+                               keep = 1;
+               }
 
-                       if (disk_bytenr > 0) {
+               if (bookend && found_extent) {
+                       if (locked_end < extent_end) {
+                               ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
+                                               locked_end, extent_end - 1,
+                                               GFP_NOFS);
+                               if (!ret) {
+                                       btrfs_release_path(root, path);
+                                       lock_extent(&BTRFS_I(inode)->io_tree,
+                                               locked_end, extent_end - 1,
+                                               GFP_NOFS);
+                                       locked_end = extent_end;
+                                       continue;
+                               }
+                               locked_end = extent_end;
+                       }
+                       disk_bytenr = le64_to_cpu(old.disk_bytenr);
+                       if (disk_bytenr != 0) {
                                ret = btrfs_inc_extent_ref(trans, root,
-                                               disk_bytenr, num_bytes, 0,
-                                               root->root_key.objectid,
-                                               new_key.objectid,
-                                               start - extent_offset);
+                                          disk_bytenr,
+                                          le64_to_cpu(old.disk_num_bytes), 0,
+                                          root->root_key.objectid,
+                                          key.objectid, key.offset -
+                                          le64_to_cpu(old.offset));
                                BUG_ON(ret);
-                               *hint_byte = disk_bytenr;
                        }
-                       key.offset = start;
                }
-               /*
-                *  | ---- range to drop ----- |
-                *      | -------- extent -------- |
-                */
-               if (start <= key.offset && end < extent_end) {
-                       BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
 
-                       memcpy(&new_key, &key, sizeof(new_key));
-                       new_key.offset = end;
-                       btrfs_set_item_key_safe(trans, root, path, &new_key);
-
-                       extent_offset += end - key.offset;
-                       btrfs_set_file_extent_offset(leaf, fi, extent_offset);
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       extent_end - end);
-                       btrfs_mark_buffer_dirty(leaf);
-                       if (disk_bytenr > 0) {
-                               inode_sub_bytes(inode, end - key.offset);
-                               *hint_byte = disk_bytenr;
+               if (found_inline) {
+                       u64 mask = root->sectorsize - 1;
+                       search_start = (extent_end + mask) & ~mask;
+               } else
+                       search_start = extent_end;
+
+               /* truncate existing extent */
+               if (start > key.offset) {
+                       u64 new_num;
+                       u64 old_num;
+                       keep = 1;
+                       WARN_ON(start & (root->sectorsize - 1));
+                       if (found_extent) {
+                               new_num = start - key.offset;
+                               old_num = btrfs_file_extent_num_bytes(leaf,
+                                                                     extent);
+                               *hint_byte =
+                                       btrfs_file_extent_disk_bytenr(leaf,
+                                                                     extent);
+                               if (btrfs_file_extent_disk_bytenr(leaf,
+                                                                 extent)) {
+                                       inode_sub_bytes(inode, old_num -
+                                                       new_num);
+                               }
+                               btrfs_set_file_extent_num_bytes(leaf,
+                                                       extent, new_num);
+                               btrfs_mark_buffer_dirty(leaf);
+                       } else if (key.offset < inline_limit &&
+                                  (end > extent_end) &&
+                                  (inline_limit < extent_end)) {
+                               u32 new_size;
+                               new_size = btrfs_file_extent_calc_inline_size(
+                                                  inline_limit - key.offset);
+                               inode_sub_bytes(inode, extent_end -
+                                               inline_limit);
+                               btrfs_set_file_extent_ram_bytes(leaf, extent,
+                                                       new_size);
+                               if (!compression && !encryption) {
+                                       btrfs_truncate_item(trans, root, path,
+                                                           new_size, 1);
+                               }
                        }
-                       break;
                }
+               /* delete the entire extent */
+               if (!keep) {
+                       if (found_inline)
+                               inode_sub_bytes(inode, extent_end -
+                                               key.offset);
+                       ret = btrfs_del_item(trans, root, path);
+                       /* TODO update progress marker and return */
+                       BUG_ON(ret);
+                       extent = NULL;
+                       btrfs_release_path(root, path);
+                       /* the extent will be freed later */
+               }
+               if (bookend && found_inline && start <= key.offset) {
+                       u32 new_size;
+                       new_size = btrfs_file_extent_calc_inline_size(
+                                                  extent_end - end);
+                       inode_sub_bytes(inode, end - key.offset);
+                       btrfs_set_file_extent_ram_bytes(leaf, extent,
+                                                       new_size);
+                       if (!compression && !encryption)
+                               ret = btrfs_truncate_item(trans, root, path,
+                                                         new_size, 0);
+                       BUG_ON(ret);
+               }
+               /* create bookend, splitting the extent in two */
+               if (bookend && found_extent) {
+                       struct btrfs_key ins;
+                       ins.objectid = inode->i_ino;
+                       ins.offset = end;
+                       btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
 
-               search_start = extent_end;
-               /*
-                *       | ---- range to drop ----- |
-                *  | -------- extent -------- |
-                */
-               if (start > key.offset && end >= extent_end) {
-                       BUG_ON(del_nr > 0);
-                       BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+                       btrfs_release_path(root, path);
+                       path->leave_spinning = 1;
+                       ret = btrfs_insert_empty_item(trans, root, path, &ins,
+                                                     sizeof(*extent));
+                       BUG_ON(ret);
 
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       start - key.offset);
-                       btrfs_mark_buffer_dirty(leaf);
-                       if (disk_bytenr > 0) {
-                               inode_sub_bytes(inode, extent_end - start);
-                               *hint_byte = disk_bytenr;
-                       }
-                       if (end == extent_end)
-                               break;
+                       leaf = path->nodes[0];
+                       extent = btrfs_item_ptr(leaf, path->slots[0],
+                                               struct btrfs_file_extent_item);
+                       write_extent_buffer(leaf, &old,
+                                           (unsigned long)extent, sizeof(old));
+
+                       btrfs_set_file_extent_compression(leaf, extent,
+                                                         compression);
+                       btrfs_set_file_extent_encryption(leaf, extent,
+                                                        encryption);
+                       btrfs_set_file_extent_other_encoding(leaf, extent,
+                                                            other_encoding);
+                       btrfs_set_file_extent_offset(leaf, extent,
+                                   le64_to_cpu(old.offset) + end - key.offset);
+                       WARN_ON(le64_to_cpu(old.num_bytes) <
+                               (extent_end - end));
+                       btrfs_set_file_extent_num_bytes(leaf, extent,
+                                                       extent_end - end);
 
-                       path->slots[0]++;
-                       goto next_slot;
+                       /*
+                        * set the ram bytes to the size of the full extent
+                        * before splitting.  This is a worst case flag,
+                        * but its the best we can do because we don't know
+                        * how splitting affects compression
+                        */
+                       btrfs_set_file_extent_ram_bytes(leaf, extent,
+                                                       ram_bytes);
+                       btrfs_set_file_extent_type(leaf, extent, found_type);
+
+                       btrfs_unlock_up_safe(path, 1);
+                       btrfs_mark_buffer_dirty(path->nodes[0]);
+                       btrfs_set_lock_blocking(path->nodes[0]);
+
+                       path->leave_spinning = 0;
+                       btrfs_release_path(root, path);
+                       if (disk_bytenr != 0)
+                               inode_add_bytes(inode, extent_end - end);
                }
 
-               /*
-                *  | ---- range to drop ----- |
-                *    | ------ extent ------ |
-                */
-               if (start <= key.offset && end >= extent_end) {
-                       if (del_nr == 0) {
-                               del_slot = path->slots[0];
-                               del_nr = 1;
-                       } else {
-                               BUG_ON(del_slot + del_nr != path->slots[0]);
-                               del_nr++;
-                       }
+               if (found_extent && !keep) {
+                       u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
 
-                       if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+                       if (old_disk_bytenr != 0) {
                                inode_sub_bytes(inode,
-                                               extent_end - key.offset);
-                               extent_end = ALIGN(extent_end,
-                                                  root->sectorsize);
-                       } else if (disk_bytenr > 0) {
+                                               le64_to_cpu(old.num_bytes));
                                ret = btrfs_free_extent(trans, root,
-                                               disk_bytenr, num_bytes, 0,
-                                               root->root_key.objectid,
+                                               old_disk_bytenr,
+                                               le64_to_cpu(old.disk_num_bytes),
+                                               0, root->root_key.objectid,
                                                key.objectid, key.offset -
-                                               extent_offset);
+                                               le64_to_cpu(old.offset));
                                BUG_ON(ret);
-                               inode_sub_bytes(inode,
-                                               extent_end - key.offset);
-                               *hint_byte = disk_bytenr;
-                       }
-
-                       if (end == extent_end)
-                               break;
-
-                       if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
-                               path->slots[0]++;
-                               goto next_slot;
+                               *hint_byte = old_disk_bytenr;
                        }
-
-                       ret = btrfs_del_items(trans, root, path, del_slot,
-                                             del_nr);
-                       BUG_ON(ret);
-
-                       del_nr = 0;
-                       del_slot = 0;
-
-                       btrfs_release_path(root, path);
-                       continue;
                }
 
-               BUG_ON(1);
-       }
-
-       if (del_nr > 0) {
-               ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
-               BUG_ON(ret);
+               if (search_start >= end) {
+                       ret = 0;
+                       goto out;
+               }
        }
-
+out:
        btrfs_free_path(path);
+       if (locked_end > orig_locked_end) {
+               unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
+                             locked_end - 1, GFP_NOFS);
+       }
        return ret;
 }
 
 static int extent_mergeable(struct extent_buffer *leaf, int slot,
-                           u64 objectid, u64 bytenr, u64 orig_offset,
-                           u64 *start, u64 *end)
+                           u64 objectid, u64 bytenr, u64 *start, u64 *end)
 {
        struct btrfs_file_extent_item *fi;
        struct btrfs_key key;
@@ -523,7 +598,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
        fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
        if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
            btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
-           btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
            btrfs_file_extent_compression(leaf, fi) ||
            btrfs_file_extent_encryption(leaf, fi) ||
            btrfs_file_extent_other_encoding(leaf, fi))
@@ -546,24 +620,23 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
  * two or three.
  */
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root,
                              struct inode *inode, u64 start, u64 end)
 {
-       struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_buffer *leaf;
        struct btrfs_path *path;
        struct btrfs_file_extent_item *fi;
        struct btrfs_key key;
-       struct btrfs_key new_key;
        u64 bytenr;
        u64 num_bytes;
        u64 extent_end;
        u64 orig_offset;
        u64 other_start;
        u64 other_end;
-       u64 split;
-       int del_nr = 0;
-       int del_slot = 0;
-       int recow;
+       u64 split = start;
+       u64 locked_end = end;
+       int extent_type;
+       int split_end = 1;
        int ret;
 
        btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -571,11 +644,12 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        BUG_ON(!path);
 again:
-       recow = 0;
-       split = start;
        key.objectid = inode->i_ino;
        key.type = BTRFS_EXTENT_DATA_KEY;
-       key.offset = split;
+       if (split == start)
+               key.offset = split;
+       else
+               key.offset = split - 1;
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
        if (ret > 0 && path->slots[0] > 0)
@@ -587,158 +661,159 @@ again:
               key.type != BTRFS_EXTENT_DATA_KEY);
        fi = btrfs_item_ptr(leaf, path->slots[0],
                            struct btrfs_file_extent_item);
-       BUG_ON(btrfs_file_extent_type(leaf, fi) !=
-              BTRFS_FILE_EXTENT_PREALLOC);
+       extent_type = btrfs_file_extent_type(leaf, fi);
+       BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
        extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
        BUG_ON(key.offset > start || extent_end < end);
 
        bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
        num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
        orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
-       memcpy(&new_key, &key, sizeof(new_key));
 
-       if (start == key.offset && end < extent_end) {
-               other_start = 0;
-               other_end = start;
-               if (extent_mergeable(leaf, path->slots[0] - 1,
-                                    inode->i_ino, bytenr, orig_offset,
-                                    &other_start, &other_end)) {
-                       new_key.offset = end;
-                       btrfs_set_item_key_safe(trans, root, path, &new_key);
-                       fi = btrfs_item_ptr(leaf, path->slots[0],
-                                           struct btrfs_file_extent_item);
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       extent_end - end);
-                       btrfs_set_file_extent_offset(leaf, fi,
-                                                    end - orig_offset);
-                       fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
-                                           struct btrfs_file_extent_item);
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       end - other_start);
-                       btrfs_mark_buffer_dirty(leaf);
-                       goto out;
-               }
-       }
+       if (key.offset == start)
+               split = end;
 
-       if (start > key.offset && end == extent_end) {
+       if (key.offset == start && extent_end == end) {
+               int del_nr = 0;
+               int del_slot = 0;
                other_start = end;
                other_end = 0;
-               if (extent_mergeable(leaf, path->slots[0] + 1,
-                                    inode->i_ino, bytenr, orig_offset,
-                                    &other_start, &other_end)) {
-                       fi = btrfs_item_ptr(leaf, path->slots[0],
-                                           struct btrfs_file_extent_item);
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       start - key.offset);
-                       path->slots[0]++;
-                       new_key.offset = start;
-                       btrfs_set_item_key_safe(trans, root, path, &new_key);
-
-                       fi = btrfs_item_ptr(leaf, path->slots[0],
-                                           struct btrfs_file_extent_item);
-                       btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       other_end - start);
-                       btrfs_set_file_extent_offset(leaf, fi,
-                                                    start - orig_offset);
-                       btrfs_mark_buffer_dirty(leaf);
-                       goto out;
+               if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
+                                    bytenr, &other_start, &other_end)) {
+                       extent_end = other_end;
+                       del_slot = path->slots[0] + 1;
+                       del_nr++;
+                       ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+                                               0, root->root_key.objectid,
+                                               inode->i_ino, orig_offset);
+                       BUG_ON(ret);
                }
-       }
-
-       while (start > key.offset || end < extent_end) {
-               if (key.offset == start)
-                       split = end;
-
-               new_key.offset = split;
-               ret = btrfs_duplicate_item(trans, root, path, &new_key);
-               if (ret == -EAGAIN) {
-                       btrfs_release_path(root, path);
-                       goto again;
+               other_start = 0;
+               other_end = start;
+               if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
+                                    bytenr, &other_start, &other_end)) {
+                       key.offset = other_start;
+                       del_slot = path->slots[0];
+                       del_nr++;
+                       ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+                                               0, root->root_key.objectid,
+                                               inode->i_ino, orig_offset);
+                       BUG_ON(ret);
+               }
+               split_end = 0;
+               if (del_nr == 0) {
+                       btrfs_set_file_extent_type(leaf, fi,
+                                                  BTRFS_FILE_EXTENT_REG);
+                       goto done;
                }
-               BUG_ON(ret < 0);
-
-               leaf = path->nodes[0];
-               fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
-                                   struct btrfs_file_extent_item);
-               btrfs_set_file_extent_num_bytes(leaf, fi,
-                                               split - key.offset);
 
-               fi = btrfs_item_ptr(leaf, path->slots[0],
+               fi = btrfs_item_ptr(leaf, del_slot - 1,
                                    struct btrfs_file_extent_item);
-
-               btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+               btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
                btrfs_set_file_extent_num_bytes(leaf, fi,
-                                               extent_end - split);
+                                               extent_end - key.offset);
                btrfs_mark_buffer_dirty(leaf);
 
-               ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
-                                          root->root_key.objectid,
-                                          inode->i_ino, orig_offset);
+               ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
                BUG_ON(ret);
-
-               if (split == start) {
-                       key.offset = start;
-               } else {
-                       BUG_ON(start != key.offset);
-                       path->slots[0]--;
-                       extent_end = end;
+               goto release;
+       } else if (split == start) {
+               if (locked_end < extent_end) {
+                       ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
+                                       locked_end, extent_end - 1, GFP_NOFS);
+                       if (!ret) {
+                               btrfs_release_path(root, path);
+                               lock_extent(&BTRFS_I(inode)->io_tree,
+                                       locked_end, extent_end - 1, GFP_NOFS);
+                               locked_end = extent_end;
+                               goto again;
+                       }
+                       locked_end = extent_end;
                }
-               recow = 1;
+               btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
+       } else  {
+               BUG_ON(key.offset != start);
+               key.offset = split;
+               btrfs_set_file_extent_offset(leaf, fi, key.offset -
+                                            orig_offset);
+               btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
+               btrfs_set_item_key_safe(trans, root, path, &key);
+               extent_end = split;
        }
 
-       other_start = end;
-       other_end = 0;
-       if (extent_mergeable(leaf, path->slots[0] + 1,
-                            inode->i_ino, bytenr, orig_offset,
-                            &other_start, &other_end)) {
-               if (recow) {
-                       btrfs_release_path(root, path);
-                       goto again;
+       if (extent_end == end) {
+               split_end = 0;
+               extent_type = BTRFS_FILE_EXTENT_REG;
+       }
+       if (extent_end == end && split == start) {
+               other_start = end;
+               other_end = 0;
+               if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
+                                    bytenr, &other_start, &other_end)) {
+                       path->slots[0]++;
+                       fi = btrfs_item_ptr(leaf, path->slots[0],
+                                           struct btrfs_file_extent_item);
+                       key.offset = split;
+                       btrfs_set_item_key_safe(trans, root, path, &key);
+                       btrfs_set_file_extent_offset(leaf, fi, key.offset -
+                                                    orig_offset);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       other_end - split);
+                       goto done;
                }
-               extent_end = other_end;
-               del_slot = path->slots[0] + 1;
-               del_nr++;
-               ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
-                                       0, root->root_key.objectid,
-                                       inode->i_ino, orig_offset);
-               BUG_ON(ret);
        }
-       other_start = 0;
-       other_end = start;
-       if (extent_mergeable(leaf, path->slots[0] - 1,
-                            inode->i_ino, bytenr, orig_offset,
-                            &other_start, &other_end)) {
-               if (recow) {
-                       btrfs_release_path(root, path);
-                       goto again;
+       if (extent_end == end && split == end) {
+               other_start = 0;
+               other_end = start;
+               if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
+                                    bytenr, &other_start, &other_end)) {
+                       path->slots[0]--;
+                       fi = btrfs_item_ptr(leaf, path->slots[0],
+                                           struct btrfs_file_extent_item);
+                       btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
+                                                       other_start);
+                       goto done;
                }
-               key.offset = other_start;
-               del_slot = path->slots[0];
-               del_nr++;
-               ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
-                                       0, root->root_key.objectid,
-                                       inode->i_ino, orig_offset);
-               BUG_ON(ret);
        }
-       if (del_nr == 0) {
-               fi = btrfs_item_ptr(leaf, path->slots[0],
-                          struct btrfs_file_extent_item);
-               btrfs_set_file_extent_type(leaf, fi,
-                                          BTRFS_FILE_EXTENT_REG);
-               btrfs_mark_buffer_dirty(leaf);
-       } else {
-               fi = btrfs_item_ptr(leaf, del_slot - 1,
-                          struct btrfs_file_extent_item);
-               btrfs_set_file_extent_type(leaf, fi,
-                                          BTRFS_FILE_EXTENT_REG);
-               btrfs_set_file_extent_num_bytes(leaf, fi,
-                                               extent_end - key.offset);
-               btrfs_mark_buffer_dirty(leaf);
 
-               ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
-               BUG_ON(ret);
+       btrfs_mark_buffer_dirty(leaf);
+
+       ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
+                                  root->root_key.objectid,
+                                  inode->i_ino, orig_offset);
+       BUG_ON(ret);
+       btrfs_release_path(root, path);
+
+       key.offset = start;
+       ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
+       BUG_ON(ret);
+
+       leaf = path->nodes[0];
+       fi = btrfs_item_ptr(leaf, path->slots[0],
+                           struct btrfs_file_extent_item);
+       btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+       btrfs_set_file_extent_type(leaf, fi, extent_type);
+       btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
+       btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
+       btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset);
+       btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
+       btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+       btrfs_set_file_extent_compression(leaf, fi, 0);
+       btrfs_set_file_extent_encryption(leaf, fi, 0);
+       btrfs_set_file_extent_other_encoding(leaf, fi, 0);
+done:
+       btrfs_mark_buffer_dirty(leaf);
+
+release:
+       btrfs_release_path(root, path);
+       if (split_end && split == start) {
+               split = end;
+               goto again;
+       }
+       if (locked_end > end) {
+               unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
+                             GFP_NOFS);
        }
-out:
        btrfs_free_path(path);
        return 0;
 }
@@ -1135,7 +1210,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
        }
        mutex_lock(&dentry->d_inode->i_mutex);
 out:
-       return ret > 0 ? -EIO : ret;
+       return ret > 0 ? EIO : ret;
 }
 
 static const struct vm_operations_struct btrfs_file_vm_ops = {
index e03a836d50d08051ea99ae4869e1d8b765ff63f5..b3ad168a0bfc97906dd1fa556b61297250faae58 100644 (file)
@@ -88,14 +88,13 @@ static noinline int cow_file_range(struct inode *inode,
                                   u64 start, u64 end, int *page_started,
                                   unsigned long *nr_written, int unlock);
 
-static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
-                                    struct inode *inode,  struct inode *dir)
+static int btrfs_init_inode_security(struct inode *inode,  struct inode *dir)
 {
        int err;
 
-       err = btrfs_init_acl(trans, inode, dir);
+       err = btrfs_init_acl(inode, dir);
        if (!err)
-               err = btrfs_xattr_security_init(trans, inode, dir);
+               err = btrfs_xattr_security_init(inode, dir);
        return err;
 }
 
@@ -189,18 +188,8 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        btrfs_free_path(path);
 
-       /*
-        * we're an inline extent, so nobody can
-        * extend the file past i_size without locking
-        * a page we already have locked.
-        *
-        * We must do any isize and inode updates
-        * before we unlock the pages.  Otherwise we
-        * could end up racing with unlink.
-        */
        BTRFS_I(inode)->disk_i_size = inode->i_size;
        btrfs_update_inode(trans, root, inode);
-
        return 0;
 fail:
        btrfs_free_path(path);
@@ -241,7 +230,8 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
                return 1;
        }
 
-       ret = btrfs_drop_extents(trans, inode, start, aligned_end,
+       ret = btrfs_drop_extents(trans, root, inode, start,
+                                aligned_end, aligned_end, start,
                                 &hint_byte, 1);
        BUG_ON(ret);
 
@@ -426,6 +416,7 @@ again:
                                                    start, end,
                                                    total_compressed, pages);
                }
+               btrfs_end_transaction(trans, root);
                if (ret == 0) {
                        /*
                         * inline extent creation worked, we don't need
@@ -439,11 +430,9 @@ again:
                             EXTENT_CLEAR_DELALLOC |
                             EXTENT_CLEAR_ACCOUNTING |
                             EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
-
-                       btrfs_end_transaction(trans, root);
+                       ret = 0;
                        goto free_pages_out;
                }
-               btrfs_end_transaction(trans, root);
        }
 
        if (will_compress) {
@@ -554,6 +543,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
        if (list_empty(&async_cow->extents))
                return 0;
 
+       trans = btrfs_join_transaction(root, 1);
 
        while (!list_empty(&async_cow->extents)) {
                async_extent = list_entry(async_cow->extents.next,
@@ -600,15 +590,19 @@ retry:
                lock_extent(io_tree, async_extent->start,
                            async_extent->start + async_extent->ram_size - 1,
                            GFP_NOFS);
+               /*
+                * here we're doing allocation and writeback of the
+                * compressed pages
+                */
+               btrfs_drop_extent_cache(inode, async_extent->start,
+                                       async_extent->start +
+                                       async_extent->ram_size - 1, 0);
 
-               trans = btrfs_join_transaction(root, 1);
                ret = btrfs_reserve_extent(trans, root,
                                           async_extent->compressed_size,
                                           async_extent->compressed_size,
                                           0, alloc_hint,
                                           (u64)-1, &ins, 1);
-               btrfs_end_transaction(trans, root);
-
                if (ret) {
                        int i;
                        for (i = 0; i < async_extent->nr_pages; i++) {
@@ -624,14 +618,6 @@ retry:
                        goto retry;
                }
 
-               /*
-                * here we're doing allocation and writeback of the
-                * compressed pages
-                */
-               btrfs_drop_extent_cache(inode, async_extent->start,
-                                       async_extent->start +
-                                       async_extent->ram_size - 1, 0);
-
                em = alloc_extent_map(GFP_NOFS);
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
@@ -663,6 +649,8 @@ retry:
                                               BTRFS_ORDERED_COMPRESSED);
                BUG_ON(ret);
 
+               btrfs_end_transaction(trans, root);
+
                /*
                 * clear dirty, set writeback and unlock the pages.
                 */
@@ -684,11 +672,13 @@ retry:
                                    async_extent->nr_pages);
 
                BUG_ON(ret);
+               trans = btrfs_join_transaction(root, 1);
                alloc_hint = ins.objectid + ins.offset;
                kfree(async_extent);
                cond_resched();
        }
 
+       btrfs_end_transaction(trans, root);
        return 0;
 }
 
@@ -752,7 +742,6 @@ static noinline int cow_file_range(struct inode *inode,
                                     EXTENT_CLEAR_DIRTY |
                                     EXTENT_SET_WRITEBACK |
                                     EXTENT_END_WRITEBACK);
-
                        *nr_written = *nr_written +
                             (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
                        *page_started = 1;
@@ -1607,6 +1596,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
                                       struct inode *inode, u64 file_pos,
                                       u64 disk_bytenr, u64 disk_num_bytes,
                                       u64 num_bytes, u64 ram_bytes,
+                                      u64 locked_end,
                                       u8 compression, u8 encryption,
                                       u16 other_encoding, int extent_type)
 {
@@ -1632,8 +1622,9 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
         * the caller is expected to unpin it and allow it to be merged
         * with the others.
         */
-       ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
-                                &hint, 0);
+       ret = btrfs_drop_extents(trans, root, inode, file_pos,
+                                file_pos + num_bytes, locked_end,
+                                file_pos, &hint, 0);
        BUG_ON(ret);
 
        ins.objectid = inode->i_ino;
@@ -1680,6 +1671,24 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
  * before we start the transaction.  It limits the amount of btree
  * reads required while inside the transaction.
  */
+static noinline void reada_csum(struct btrfs_root *root,
+                               struct btrfs_path *path,
+                               struct btrfs_ordered_extent *ordered_extent)
+{
+       struct btrfs_ordered_sum *sum;
+       u64 bytenr;
+
+       sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
+                        list);
+       bytenr = sum->sums[0].bytenr;
+
+       /*
+        * we don't care about the results, the point of this search is
+        * just to get the btree leaves into ram
+        */
+       btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
+}
+
 /* as ordered data IO finishes, this gets called so we can finish
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
@@ -1690,6 +1699,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        struct btrfs_trans_handle *trans;
        struct btrfs_ordered_extent *ordered_extent = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+       struct btrfs_path *path;
        int compressed = 0;
        int ret;
 
@@ -1697,32 +1707,46 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        if (!ret)
                return 0;
 
-       ordered_extent = btrfs_lookup_ordered_extent(inode, start);
-       BUG_ON(!ordered_extent);
-
-       if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
-               BUG_ON(!list_empty(&ordered_extent->list));
-               ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
-               if (!ret) {
-                       trans = btrfs_join_transaction(root, 1);
-                       ret = btrfs_update_inode(trans, root, inode);
-                       BUG_ON(ret);
-                       btrfs_end_transaction(trans, root);
+       /*
+        * before we join the transaction, try to do some of our IO.
+        * This will limit the amount of IO that we have to do with
+        * the transaction running.  We're unlikely to need to do any
+        * IO if the file extents are new, the disk_i_size checks
+        * covers the most common case.
+        */
+       if (start < BTRFS_I(inode)->disk_i_size) {
+               path = btrfs_alloc_path();
+               if (path) {
+                       ret = btrfs_lookup_file_extent(NULL, root, path,
+                                                      inode->i_ino,
+                                                      start, 0);
+                       ordered_extent = btrfs_lookup_ordered_extent(inode,
+                                                                    start);
+                       if (!list_empty(&ordered_extent->list)) {
+                               btrfs_release_path(root, path);
+                               reada_csum(root, path, ordered_extent);
+                       }
+                       btrfs_free_path(path);
                }
-               goto out;
        }
 
+       trans = btrfs_join_transaction(root, 1);
+
+       if (!ordered_extent)
+               ordered_extent = btrfs_lookup_ordered_extent(inode, start);
+       BUG_ON(!ordered_extent);
+       if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
+               goto nocow;
+
        lock_extent(io_tree, ordered_extent->file_offset,
                    ordered_extent->file_offset + ordered_extent->len - 1,
                    GFP_NOFS);
 
-       trans = btrfs_join_transaction(root, 1);
-
        if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
                compressed = 1;
        if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
                BUG_ON(compressed);
-               ret = btrfs_mark_extent_written(trans, inode,
+               ret = btrfs_mark_extent_written(trans, root, inode,
                                                ordered_extent->file_offset,
                                                ordered_extent->file_offset +
                                                ordered_extent->len);
@@ -1734,6 +1758,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                                                ordered_extent->disk_len,
                                                ordered_extent->len,
                                                ordered_extent->len,
+                                               ordered_extent->file_offset +
+                                               ordered_extent->len,
                                                compressed, 0, 0,
                                                BTRFS_FILE_EXTENT_REG);
                unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
@@ -1744,20 +1770,22 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        unlock_extent(io_tree, ordered_extent->file_offset,
                    ordered_extent->file_offset + ordered_extent->len - 1,
                    GFP_NOFS);
+nocow:
        add_pending_csums(trans, inode, ordered_extent->file_offset,
                          &ordered_extent->list);
 
-       /* this also removes the ordered extent from the tree */
-       btrfs_ordered_update_i_size(inode, 0, ordered_extent);
-       ret = btrfs_update_inode(trans, root, inode);
-       BUG_ON(ret);
-       btrfs_end_transaction(trans, root);
-out:
+       mutex_lock(&BTRFS_I(inode)->extent_mutex);
+       btrfs_ordered_update_i_size(inode, ordered_extent);
+       btrfs_update_inode(trans, root, inode);
+       btrfs_remove_ordered_extent(inode, ordered_extent);
+       mutex_unlock(&BTRFS_I(inode)->extent_mutex);
+
        /* once for us */
        btrfs_put_ordered_extent(ordered_extent);
        /* once for the tree */
        btrfs_put_ordered_extent(ordered_extent);
 
+       btrfs_end_transaction(trans, root);
        return 0;
 }
 
@@ -1980,54 +2008,6 @@ zeroit:
        return -EIO;
 }
 
-struct delayed_iput {
-       struct list_head list;
-       struct inode *inode;
-};
-
-void btrfs_add_delayed_iput(struct inode *inode)
-{
-       struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
-       struct delayed_iput *delayed;
-
-       if (atomic_add_unless(&inode->i_count, -1, 1))
-               return;
-
-       delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
-       delayed->inode = inode;
-
-       spin_lock(&fs_info->delayed_iput_lock);
-       list_add_tail(&delayed->list, &fs_info->delayed_iputs);
-       spin_unlock(&fs_info->delayed_iput_lock);
-}
-
-void btrfs_run_delayed_iputs(struct btrfs_root *root)
-{
-       LIST_HEAD(list);
-       struct btrfs_fs_info *fs_info = root->fs_info;
-       struct delayed_iput *delayed;
-       int empty;
-
-       spin_lock(&fs_info->delayed_iput_lock);
-       empty = list_empty(&fs_info->delayed_iputs);
-       spin_unlock(&fs_info->delayed_iput_lock);
-       if (empty)
-               return;
-
-       down_read(&root->fs_info->cleanup_work_sem);
-       spin_lock(&fs_info->delayed_iput_lock);
-       list_splice_init(&fs_info->delayed_iputs, &list);
-       spin_unlock(&fs_info->delayed_iput_lock);
-
-       while (!list_empty(&list)) {
-               delayed = list_entry(list.next, struct delayed_iput, list);
-               list_del(&delayed->list);
-               iput(delayed->inode);
-               kfree(delayed);
-       }
-       up_read(&root->fs_info->cleanup_work_sem);
-}
-
 /*
  * This creates an orphan entry for the given inode in case something goes
  * wrong in the middle of an unlink/truncate.
@@ -2100,17 +2080,16 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
        struct inode *inode;
        int ret = 0, nr_unlink = 0, nr_truncate = 0;
 
-       if (!xchg(&root->clean_orphans, 0))
-               return;
-
        path = btrfs_alloc_path();
-       BUG_ON(!path);
+       if (!path)
+               return;
        path->reada = -1;
 
        key.objectid = BTRFS_ORPHAN_OBJECTID;
        btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
        key.offset = (u64)-1;
 
+
        while (1) {
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
                if (ret < 0) {
@@ -2855,40 +2834,37 @@ out:
  * min_type is the minimum key type to truncate down to.  If set to 0, this
  * will kill all the items on this inode, including the INODE_ITEM_KEY.
  */
-int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
-                              struct btrfs_root *root,
-                              struct inode *inode,
-                              u64 new_size, u32 min_type)
+noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+                                       struct btrfs_root *root,
+                                       struct inode *inode,
+                                       u64 new_size, u32 min_type)
 {
+       int ret;
        struct btrfs_path *path;
-       struct extent_buffer *leaf;
-       struct btrfs_file_extent_item *fi;
        struct btrfs_key key;
        struct btrfs_key found_key;
+       u32 found_type = (u8)-1;
+       struct extent_buffer *leaf;
+       struct btrfs_file_extent_item *fi;
        u64 extent_start = 0;
        u64 extent_num_bytes = 0;
        u64 extent_offset = 0;
        u64 item_end = 0;
-       u64 mask = root->sectorsize - 1;
-       u32 found_type = (u8)-1;
        int found_extent;
        int del_item;
        int pending_del_nr = 0;
        int pending_del_slot = 0;
        int extent_type = -1;
        int encoding;
-       int ret;
-       int err = 0;
-
-       BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
+       u64 mask = root->sectorsize - 1;
 
        if (root->ref_cows)
                btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
-
        path = btrfs_alloc_path();
        BUG_ON(!path);
        path->reada = -1;
 
+       /* FIXME, add redo link to tree so we don't leak on crash */
        key.objectid = inode->i_ino;
        key.offset = (u64)-1;
        key.type = (u8)-1;
@@ -2896,17 +2872,17 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 search_again:
        path->leave_spinning = 1;
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-       if (ret < 0) {
-               err = ret;
-               goto out;
-       }
+       if (ret < 0)
+               goto error;
 
        if (ret > 0) {
                /* there are no items in the tree for us to truncate, we're
                 * done
                 */
-               if (path->slots[0] == 0)
-                       goto out;
+               if (path->slots[0] == 0) {
+                       ret = 0;
+                       goto error;
+               }
                path->slots[0]--;
        }
 
@@ -2941,17 +2917,28 @@ search_again:
                        }
                        item_end--;
                }
-               if (found_type > min_type) {
-                       del_item = 1;
-               } else {
-                       if (item_end < new_size)
-                               break;
-                       if (found_key.offset >= new_size)
-                               del_item = 1;
+               if (item_end < new_size) {
+                       if (found_type == BTRFS_DIR_ITEM_KEY)
+                               found_type = BTRFS_INODE_ITEM_KEY;
+                       else if (found_type == BTRFS_EXTENT_ITEM_KEY)
+                               found_type = BTRFS_EXTENT_DATA_KEY;
+                       else if (found_type == BTRFS_EXTENT_DATA_KEY)
+                               found_type = BTRFS_XATTR_ITEM_KEY;
+                       else if (found_type == BTRFS_XATTR_ITEM_KEY)
+                               found_type = BTRFS_INODE_REF_KEY;
+                       else if (found_type)
+                               found_type--;
                        else
-                               del_item = 0;
+                               break;
+                       btrfs_set_key_type(&key, found_type);
+                       goto next;
                }
+               if (found_key.offset >= new_size)
+                       del_item = 1;
+               else
+                       del_item = 0;
                found_extent = 0;
+
                /* FIXME, shrink the extent if the ref count is only 1 */
                if (found_type != BTRFS_EXTENT_DATA_KEY)
                        goto delete;
@@ -3038,36 +3025,42 @@ delete:
                                                inode->i_ino, extent_offset);
                        BUG_ON(ret);
                }
+next:
+               if (path->slots[0] == 0) {
+                       if (pending_del_nr)
+                               goto del_pending;
+                       btrfs_release_path(root, path);
+                       if (found_type == BTRFS_INODE_ITEM_KEY)
+                               break;
+                       goto search_again;
+               }
 
-               if (found_type == BTRFS_INODE_ITEM_KEY)
-                       break;
-
-               if (path->slots[0] == 0 ||
-                   path->slots[0] != pending_del_slot) {
-                       if (root->ref_cows) {
-                               err = -EAGAIN;
-                               goto out;
-                       }
-                       if (pending_del_nr) {
-                               ret = btrfs_del_items(trans, root, path,
-                                               pending_del_slot,
-                                               pending_del_nr);
-                               BUG_ON(ret);
-                               pending_del_nr = 0;
-                       }
+               path->slots[0]--;
+               if (pending_del_nr &&
+                   path->slots[0] + 1 != pending_del_slot) {
+                       struct btrfs_key debug;
+del_pending:
+                       btrfs_item_key_to_cpu(path->nodes[0], &debug,
+                                             pending_del_slot);
+                       ret = btrfs_del_items(trans, root, path,
+                                             pending_del_slot,
+                                             pending_del_nr);
+                       BUG_ON(ret);
+                       pending_del_nr = 0;
                        btrfs_release_path(root, path);
+                       if (found_type == BTRFS_INODE_ITEM_KEY)
+                               break;
                        goto search_again;
-               } else {
-                       path->slots[0]--;
                }
        }
-out:
+       ret = 0;
+error:
        if (pending_del_nr) {
                ret = btrfs_del_items(trans, root, path, pending_del_slot,
                                      pending_del_nr);
        }
        btrfs_free_path(path);
-       return err;
+       return ret;
 }
 
 /*
@@ -3187,6 +3180,10 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
        if (size <= hole_start)
                return 0;
 
+       err = btrfs_truncate_page(inode->i_mapping, inode->i_size);
+       if (err)
+               return err;
+
        while (1) {
                struct btrfs_ordered_extent *ordered;
                btrfs_wait_ordered_range(inode, hole_start,
@@ -3199,6 +3196,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
                btrfs_put_ordered_extent(ordered);
        }
 
+       trans = btrfs_start_transaction(root, 1);
+       btrfs_set_trans_block_group(trans, inode);
+
        cur_offset = hole_start;
        while (1) {
                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
@@ -3206,120 +3206,40 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
                BUG_ON(IS_ERR(em) || !em);
                last_byte = min(extent_map_end(em), block_end);
                last_byte = (last_byte + mask) & ~mask;
-               if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+               if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
                        u64 hint_byte = 0;
                        hole_size = last_byte - cur_offset;
-
-                       err = btrfs_reserve_metadata_space(root, 2);
+                       err = btrfs_drop_extents(trans, root, inode,
+                                                cur_offset,
+                                                cur_offset + hole_size,
+                                                block_end,
+                                                cur_offset, &hint_byte, 1);
                        if (err)
                                break;
 
-                       trans = btrfs_start_transaction(root, 1);
-                       btrfs_set_trans_block_group(trans, inode);
-
-                       err = btrfs_drop_extents(trans, inode, cur_offset,
-                                                cur_offset + hole_size,
-                                                &hint_byte, 1);
-                       BUG_ON(err);
+                       err = btrfs_reserve_metadata_space(root, 1);
+                       if (err)
+                               break;
 
                        err = btrfs_insert_file_extent(trans, root,
                                        inode->i_ino, cur_offset, 0,
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
-                       BUG_ON(err);
-
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
-
-                       btrfs_end_transaction(trans, root);
-                       btrfs_unreserve_metadata_space(root, 2);
+                       btrfs_unreserve_metadata_space(root, 1);
                }
                free_extent_map(em);
                cur_offset = last_byte;
-               if (cur_offset >= block_end)
+               if (err || cur_offset >= block_end)
                        break;
        }
 
+       btrfs_end_transaction(trans, root);
        unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
        return err;
 }
 
-static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
-{
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct btrfs_trans_handle *trans;
-       unsigned long nr;
-       int ret;
-
-       if (attr->ia_size == inode->i_size)
-               return 0;
-
-       if (attr->ia_size > inode->i_size) {
-               unsigned long limit;
-               limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-               if (attr->ia_size > inode->i_sb->s_maxbytes)
-                       return -EFBIG;
-               if (limit != RLIM_INFINITY && attr->ia_size > limit) {
-                       send_sig(SIGXFSZ, current, 0);
-                       return -EFBIG;
-               }
-       }
-
-       ret = btrfs_reserve_metadata_space(root, 1);
-       if (ret)
-               return ret;
-
-       trans = btrfs_start_transaction(root, 1);
-       btrfs_set_trans_block_group(trans, inode);
-
-       ret = btrfs_orphan_add(trans, inode);
-       BUG_ON(ret);
-
-       nr = trans->blocks_used;
-       btrfs_end_transaction(trans, root);
-       btrfs_unreserve_metadata_space(root, 1);
-       btrfs_btree_balance_dirty(root, nr);
-
-       if (attr->ia_size > inode->i_size) {
-               ret = btrfs_cont_expand(inode, attr->ia_size);
-               if (ret) {
-                       btrfs_truncate(inode);
-                       return ret;
-               }
-
-               i_size_write(inode, attr->ia_size);
-               btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
-
-               trans = btrfs_start_transaction(root, 1);
-               btrfs_set_trans_block_group(trans, inode);
-
-               ret = btrfs_update_inode(trans, root, inode);
-               BUG_ON(ret);
-               if (inode->i_nlink > 0) {
-                       ret = btrfs_orphan_del(trans, inode);
-                       BUG_ON(ret);
-               }
-               nr = trans->blocks_used;
-               btrfs_end_transaction(trans, root);
-               btrfs_btree_balance_dirty(root, nr);
-               return 0;
-       }
-
-       /*
-        * We're truncating a file that used to have good data down to
-        * zero. Make sure it gets into the ordered flush list so that
-        * any new writes get down to disk quickly.
-        */
-       if (attr->ia_size == 0)
-               BTRFS_I(inode)->ordered_data_close = 1;
-
-       /* we don't support swapfiles, so vmtruncate shouldn't fail */
-       ret = vmtruncate(inode, attr->ia_size);
-       BUG_ON(ret);
-
-       return 0;
-}
-
 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = dentry->d_inode;
@@ -3330,14 +3250,23 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
                return err;
 
        if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
-               err = btrfs_setattr_size(inode, attr);
-               if (err)
-                       return err;
+               if (attr->ia_size > inode->i_size) {
+                       err = btrfs_cont_expand(inode, attr->ia_size);
+                       if (err)
+                               return err;
+               } else if (inode->i_size > 0 &&
+                          attr->ia_size == 0) {
+
+                       /* we're truncating a file that used to have good
+                        * data down to zero.  Make sure it gets into
+                        * the ordered flush list so that any new writes
+                        * get down to disk quickly.
+                        */
+                       BTRFS_I(inode)->ordered_data_close = 1;
+               }
        }
-       attr->ia_valid &= ~ATTR_SIZE;
 
-       if (attr->ia_valid)
-               err = inode_setattr(inode, attr);
+       err = inode_setattr(inode, attr);
 
        if (!err && ((attr->ia_valid & ATTR_MODE)))
                err = btrfs_acl_chmod(inode);
@@ -3358,43 +3287,36 @@ void btrfs_delete_inode(struct inode *inode)
        }
        btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
-       if (root->fs_info->log_root_recovering) {
-               BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
-               goto no_delete;
-       }
-
        if (inode->i_nlink > 0) {
                BUG_ON(btrfs_root_refs(&root->root_item) != 0);
                goto no_delete;
        }
 
        btrfs_i_size_write(inode, 0);
+       trans = btrfs_join_transaction(root, 1);
 
-       while (1) {
-               trans = btrfs_start_transaction(root, 1);
-               btrfs_set_trans_block_group(trans, inode);
-               ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
+       btrfs_set_trans_block_group(trans, inode);
+       ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
+       if (ret) {
+               btrfs_orphan_del(NULL, inode);
+               goto no_delete_lock;
+       }
 
-               if (ret != -EAGAIN)
-                       break;
+       btrfs_orphan_del(trans, inode);
 
-               nr = trans->blocks_used;
-               btrfs_end_transaction(trans, root);
-               trans = NULL;
-               btrfs_btree_balance_dirty(root, nr);
-       }
+       nr = trans->blocks_used;
+       clear_inode(inode);
 
-       if (ret == 0) {
-               ret = btrfs_orphan_del(trans, inode);
-               BUG_ON(ret);
-       }
+       btrfs_end_transaction(trans, root);
+       btrfs_btree_balance_dirty(root, nr);
+       return;
 
+no_delete_lock:
        nr = trans->blocks_used;
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root, nr);
 no_delete:
        clear_inode(inode);
-       return;
 }
 
 /*
@@ -3647,6 +3569,7 @@ static noinline void init_btrfs_i(struct inode *inode)
        INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
        RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
        btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
+       mutex_init(&BTRFS_I(inode)->extent_mutex);
        mutex_init(&BTRFS_I(inode)->log_mutex);
 }
 
@@ -3772,13 +3695,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
        }
        srcu_read_unlock(&root->fs_info->subvol_srcu, index);
 
-       if (root != sub_root) {
-               down_read(&root->fs_info->cleanup_work_sem);
-               if (!(inode->i_sb->s_flags & MS_RDONLY))
-                       btrfs_orphan_cleanup(sub_root);
-               up_read(&root->fs_info->cleanup_work_sem);
-       }
-
        return inode;
 }
 
@@ -3953,11 +3869,7 @@ skip:
 
        /* Reached end of directory/root. Bump pos past the last item. */
        if (key_type == BTRFS_DIR_INDEX_KEY)
-               /*
-                * 32-bit glibc will use getdents64, but then strtol -
-                * so the last number we can serve is this.
-                */
-               filp->f_pos = 0x7fffffff;
+               filp->f_pos = INT_LIMIT(off_t);
        else
                filp->f_pos++;
 nopos:
@@ -4307,7 +4219,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(inode))
                goto out_unlock;
 
-       err = btrfs_init_inode_security(trans, inode, dir);
+       err = btrfs_init_inode_security(inode, dir);
        if (err) {
                drop_inode = 1;
                goto out_unlock;
@@ -4378,7 +4290,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(inode))
                goto out_unlock;
 
-       err = btrfs_init_inode_security(trans, inode, dir);
+       err = btrfs_init_inode_security(inode, dir);
        if (err) {
                drop_inode = 1;
                goto out_unlock;
@@ -4424,10 +4336,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        if (inode->i_nlink == 0)
                return -ENOENT;
 
-       /* do not allow sys_link's with other subvols of the same device */
-       if (root->objectid != BTRFS_I(inode)->root->objectid)
-               return -EPERM;
-
        /*
         * 1 item for inode ref
         * 2 items for dir items
@@ -4515,7 +4423,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 
        drop_on_err = 1;
 
-       err = btrfs_init_inode_security(trans, inode, dir);
+       err = btrfs_init_inode_security(inode, dir);
        if (err)
                goto out_fail;
 
@@ -5166,20 +5074,17 @@ static void btrfs_truncate(struct inode *inode)
        unsigned long nr;
        u64 mask = root->sectorsize - 1;
 
-       if (!S_ISREG(inode->i_mode)) {
-               WARN_ON(1);
+       if (!S_ISREG(inode->i_mode))
+               return;
+       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
                return;
-       }
 
        ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
        if (ret)
                return;
-
        btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
-       btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
 
        trans = btrfs_start_transaction(root, 1);
-       btrfs_set_trans_block_group(trans, inode);
 
        /*
         * setattr is responsible for setting the ordered_data_close flag,
@@ -5201,32 +5106,21 @@ static void btrfs_truncate(struct inode *inode)
        if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
                btrfs_add_ordered_operation(trans, root, inode);
 
-       while (1) {
-               ret = btrfs_truncate_inode_items(trans, root, inode,
-                                                inode->i_size,
-                                                BTRFS_EXTENT_DATA_KEY);
-               if (ret != -EAGAIN)
-                       break;
-
-               ret = btrfs_update_inode(trans, root, inode);
-               BUG_ON(ret);
-
-               nr = trans->blocks_used;
-               btrfs_end_transaction(trans, root);
-               btrfs_btree_balance_dirty(root, nr);
-
-               trans = btrfs_start_transaction(root, 1);
-               btrfs_set_trans_block_group(trans, inode);
-       }
+       btrfs_set_trans_block_group(trans, inode);
+       btrfs_i_size_write(inode, inode->i_size);
 
-       if (ret == 0 && inode->i_nlink > 0) {
-               ret = btrfs_orphan_del(trans, inode);
-               BUG_ON(ret);
-       }
+       ret = btrfs_orphan_add(trans, inode);
+       if (ret)
+               goto out;
+       /* FIXME, add redo link to tree so we don't leak on crash */
+       ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
+                                     BTRFS_EXTENT_DATA_KEY);
+       btrfs_update_inode(trans, root, inode);
 
-       ret = btrfs_update_inode(trans, root, inode);
+       ret = btrfs_orphan_del(trans, inode);
        BUG_ON(ret);
 
+out:
        nr = trans->blocks_used;
        ret = btrfs_end_transaction_throttle(trans, root);
        BUG_ON(ret);
@@ -5323,9 +5217,9 @@ void btrfs_destroy_inode(struct inode *inode)
 
        spin_lock(&root->list_lock);
        if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
-               printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
-                      inode->i_ino);
-               list_del_init(&BTRFS_I(inode)->i_orphan);
+               printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
+                      " list\n", inode->i_ino);
+               dump_stack();
        }
        spin_unlock(&root->list_lock);
 
@@ -5582,7 +5476,7 @@ out_fail:
  * some fairly slow code that needs optimization. This walks the list
  * of all the inodes with pending delalloc and forces them to disk.
  */
-int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
+int btrfs_start_delalloc_inodes(struct btrfs_root *root)
 {
        struct list_head *head = &root->fs_info->delalloc_inodes;
        struct btrfs_inode *binode;
@@ -5601,10 +5495,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
                spin_unlock(&root->fs_info->delalloc_lock);
                if (inode) {
                        filemap_flush(inode->i_mapping);
-                       if (delay_iput)
-                               btrfs_add_delayed_iput(inode);
-                       else
-                               iput(inode);
+                       iput(inode);
                }
                cond_resched();
                spin_lock(&root->fs_info->delalloc_lock);
@@ -5678,7 +5569,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(inode))
                goto out_unlock;
 
-       err = btrfs_init_inode_security(trans, inode, dir);
+       err = btrfs_init_inode_security(inode, dir);
        if (err) {
                drop_inode = 1;
                goto out_unlock;
@@ -5750,77 +5641,57 @@ out_fail:
        return err;
 }
 
-static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
-                       u64 alloc_hint, int mode, loff_t actual_len)
+static int prealloc_file_range(struct btrfs_trans_handle *trans,
+                              struct inode *inode, u64 start, u64 end,
+                              u64 locked_end, u64 alloc_hint, int mode)
 {
-       struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_key ins;
        u64 alloc_size;
        u64 cur_offset = start;
        u64 num_bytes = end - start;
        int ret = 0;
-       u64 i_size;
 
        while (num_bytes > 0) {
                alloc_size = min(num_bytes, root->fs_info->max_extent);
 
-               trans = btrfs_start_transaction(root, 1);
+               ret = btrfs_reserve_metadata_space(root, 1);
+               if (ret)
+                       goto out;
 
                ret = btrfs_reserve_extent(trans, root, alloc_size,
                                           root->sectorsize, 0, alloc_hint,
                                           (u64)-1, &ins, 1);
                if (ret) {
                        WARN_ON(1);
-                       goto stop_trans;
-               }
-
-               ret = btrfs_reserve_metadata_space(root, 3);
-               if (ret) {
-                       btrfs_free_reserved_extent(root, ins.objectid,
-                                                  ins.offset);
-                       goto stop_trans;
+                       goto out;
                }
-
                ret = insert_reserved_file_extent(trans, inode,
                                                  cur_offset, ins.objectid,
                                                  ins.offset, ins.offset,
-                                                 ins.offset, 0, 0, 0,
+                                                 ins.offset, locked_end,
+                                                 0, 0, 0,
                                                  BTRFS_FILE_EXTENT_PREALLOC);
                BUG_ON(ret);
                btrfs_drop_extent_cache(inode, cur_offset,
                                        cur_offset + ins.offset -1, 0);
-
                num_bytes -= ins.offset;
                cur_offset += ins.offset;
                alloc_hint = ins.objectid + ins.offset;
-
+               btrfs_unreserve_metadata_space(root, 1);
+       }
+out:
+       if (cur_offset > start) {
                inode->i_ctime = CURRENT_TIME;
                BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
                if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-                       (actual_len > inode->i_size) &&
-                       (cur_offset > inode->i_size)) {
-
-                       if (cur_offset > actual_len)
-                               i_size  = actual_len;
-                       else
-                               i_size = cur_offset;
-                       i_size_write(inode, i_size);
-                       btrfs_ordered_update_i_size(inode, i_size, NULL);
-               }
-
+                   cur_offset > i_size_read(inode))
+                       btrfs_i_size_write(inode, cur_offset);
                ret = btrfs_update_inode(trans, root, inode);
                BUG_ON(ret);
-
-               btrfs_end_transaction(trans, root);
-               btrfs_unreserve_metadata_space(root, 3);
        }
-       return ret;
 
-stop_trans:
-       btrfs_end_transaction(trans, root);
        return ret;
-
 }
 
 static long btrfs_fallocate(struct inode *inode, int mode,
@@ -5834,6 +5705,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
        u64 locked_end;
        u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
        struct extent_map *em;
+       struct btrfs_trans_handle *trans;
+       struct btrfs_root *root;
        int ret;
 
        alloc_start = offset & ~mask;
@@ -5852,7 +5725,9 @@ static long btrfs_fallocate(struct inode *inode, int mode,
                        goto out;
        }
 
-       ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
+       root = BTRFS_I(inode)->root;
+
+       ret = btrfs_check_data_free_space(root, inode,
                                          alloc_end - alloc_start);
        if (ret)
                goto out;
@@ -5861,6 +5736,12 @@ static long btrfs_fallocate(struct inode *inode, int mode,
        while (1) {
                struct btrfs_ordered_extent *ordered;
 
+               trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
+               if (!trans) {
+                       ret = -EIO;
+                       goto out_free;
+               }
+
                /* the extent lock is ordered inside the running
                 * transaction
                 */
@@ -5874,6 +5755,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
                        btrfs_put_ordered_extent(ordered);
                        unlock_extent(&BTRFS_I(inode)->io_tree,
                                      alloc_start, locked_end, GFP_NOFS);
+                       btrfs_end_transaction(trans, BTRFS_I(inode)->root);
+
                        /*
                         * we can't wait on the range with the transaction
                         * running or with the extent lock held
@@ -5894,12 +5777,10 @@ static long btrfs_fallocate(struct inode *inode, int mode,
                BUG_ON(IS_ERR(em) || !em);
                last_byte = min(extent_map_end(em), alloc_end);
                last_byte = (last_byte + mask) & ~mask;
-               if (em->block_start == EXTENT_MAP_HOLE ||
-                   (cur_offset >= inode->i_size &&
-                    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
-                       ret = prealloc_file_range(inode,
-                                                 cur_offset, last_byte,
-                                               alloc_hint, mode, offset+len);
+               if (em->block_start == EXTENT_MAP_HOLE) {
+                       ret = prealloc_file_range(trans, inode, cur_offset,
+                                       last_byte, locked_end + 1,
+                                       alloc_hint, mode);
                        if (ret < 0) {
                                free_extent_map(em);
                                break;
@@ -5918,8 +5799,9 @@ static long btrfs_fallocate(struct inode *inode, int mode,
        unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
                      GFP_NOFS);
 
-       btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
-                                      alloc_end - alloc_start);
+       btrfs_end_transaction(trans, BTRFS_I(inode)->root);
+out_free:
+       btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
 out:
        mutex_unlock(&inode->i_mutex);
        return ret;
index 0bc5776950612f7b4b8e6e0d7794d97b2c7f294b..cdbb054102b9f860ee4d119dfff39e891ac29438 100644 (file)
@@ -237,6 +237,7 @@ static noinline int create_subvol(struct btrfs_root *root,
        u64 objectid;
        u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
        u64 index = 0;
+       unsigned long nr = 1;
 
        /*
         * 1 - inode item
@@ -289,7 +290,7 @@ static noinline int create_subvol(struct btrfs_root *root,
        btrfs_set_root_generation(&root_item, trans->transid);
        btrfs_set_root_level(&root_item, 0);
        btrfs_set_root_refs(&root_item, 1);
-       btrfs_set_root_used(&root_item, leaf->len);
+       btrfs_set_root_used(&root_item, 0);
        btrfs_set_root_last_snapshot(&root_item, 0);
 
        memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
@@ -341,21 +342,24 @@ static noinline int create_subvol(struct btrfs_root *root,
 
        d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
 fail:
+       nr = trans->blocks_used;
        err = btrfs_commit_transaction(trans, root);
        if (err && !ret)
                ret = err;
 
        btrfs_unreserve_metadata_space(root, 6);
+       btrfs_btree_balance_dirty(root, nr);
        return ret;
 }
 
 static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
                           char *name, int namelen)
 {
-       struct inode *inode;
        struct btrfs_pending_snapshot *pending_snapshot;
        struct btrfs_trans_handle *trans;
-       int ret;
+       int ret = 0;
+       int err;
+       unsigned long nr = 0;
 
        if (!root->ref_cows)
                return -EINVAL;
@@ -368,20 +372,20 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
         */
        ret = btrfs_reserve_metadata_space(root, 6);
        if (ret)
-               goto fail;
+               goto fail_unlock;
 
        pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
        if (!pending_snapshot) {
                ret = -ENOMEM;
                btrfs_unreserve_metadata_space(root, 6);
-               goto fail;
+               goto fail_unlock;
        }
        pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
        if (!pending_snapshot->name) {
                ret = -ENOMEM;
                kfree(pending_snapshot);
                btrfs_unreserve_metadata_space(root, 6);
-               goto fail;
+               goto fail_unlock;
        }
        memcpy(pending_snapshot->name, name, namelen);
        pending_snapshot->name[namelen] = '\0';
@@ -391,19 +395,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
        pending_snapshot->root = root;
        list_add(&pending_snapshot->list,
                 &trans->transaction->pending_snapshots);
-       ret = btrfs_commit_transaction(trans, root);
-       BUG_ON(ret);
-       btrfs_unreserve_metadata_space(root, 6);
+       err = btrfs_commit_transaction(trans, root);
 
-       inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
-       if (IS_ERR(inode)) {
-               ret = PTR_ERR(inode);
-               goto fail;
-       }
-       BUG_ON(!inode);
-       d_instantiate(dentry, inode);
-       ret = 0;
-fail:
+fail_unlock:
+       btrfs_btree_balance_dirty(root, nr);
        return ret;
 }
 
@@ -952,7 +947,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
         */
 
        /* the destination must be opened for writing */
-       if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
+       if (!(file->f_mode & FMODE_WRITE))
                return -EINVAL;
 
        ret = mnt_want_write(file->f_path.mnt);
@@ -964,17 +959,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                ret = -EBADF;
                goto out_drop_write;
        }
-
        src = src_file->f_dentry->d_inode;
 
        ret = -EINVAL;
        if (src == inode)
                goto out_fput;
 
-       /* the src must be open for reading */
-       if (!(src_file->f_mode & FMODE_READ))
-               goto out_fput;
-
        ret = -EISDIR;
        if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
                goto out_fput;
@@ -1005,7 +995,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
        /* determine range to clone */
        ret = -EINVAL;
-       if (off + len > src->i_size || off + len < off)
+       if (off >= src->i_size || off + len > src->i_size)
                goto out_unlock;
        if (len == 0)
                olen = len = src->i_size - off;
@@ -1037,7 +1027,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        BUG_ON(!trans);
 
        /* punch hole in destination first */
-       btrfs_drop_extents(trans, inode, off, off + len, &hint_byte, 1);
+       btrfs_drop_extents(trans, root, inode, off, off + len,
+                          off + len, 0, &hint_byte, 1);
 
        /* clone data */
        key.objectid = src->i_ino;
index 5c2a9e78a949a03ff239db3b99843c258ce7cd84..5799bc46a30993a1cb477fb3a4dd9db23dcad6da 100644 (file)
@@ -291,16 +291,16 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 
 /*
  * remove an ordered extent from the tree.  No references are dropped
- * and you must wake_up entry->wait.  You must hold the tree mutex
- * while you call this function.
+ * but, anyone waiting on this extent is woken up.
  */
-static int __btrfs_remove_ordered_extent(struct inode *inode,
+int btrfs_remove_ordered_extent(struct inode *inode,
                                struct btrfs_ordered_extent *entry)
 {
        struct btrfs_ordered_inode_tree *tree;
        struct rb_node *node;
 
        tree = &BTRFS_I(inode)->ordered_tree;
+       mutex_lock(&tree->mutex);
        node = &entry->rb_node;
        rb_erase(node, &tree->tree);
        tree->last = NULL;
@@ -326,34 +326,16 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
        }
        spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
 
-       return 0;
-}
-
-/*
- * remove an ordered extent from the tree.  No references are dropped
- * but any waiters are woken.
- */
-int btrfs_remove_ordered_extent(struct inode *inode,
-                               struct btrfs_ordered_extent *entry)
-{
-       struct btrfs_ordered_inode_tree *tree;
-       int ret;
-
-       tree = &BTRFS_I(inode)->ordered_tree;
-       mutex_lock(&tree->mutex);
-       ret = __btrfs_remove_ordered_extent(inode, entry);
        mutex_unlock(&tree->mutex);
        wake_up(&entry->wait);
-
-       return ret;
+       return 0;
 }
 
 /*
  * wait for all the ordered extents in a root.  This is done when balancing
  * space between drives.
  */
-int btrfs_wait_ordered_extents(struct btrfs_root *root,
-                              int nocow_only, int delay_iput)
+int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
 {
        struct list_head splice;
        struct list_head *cur;
@@ -390,10 +372,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,
                if (inode) {
                        btrfs_start_ordered_extent(inode, ordered, 1);
                        btrfs_put_ordered_extent(ordered);
-                       if (delay_iput)
-                               btrfs_add_delayed_iput(inode);
-                       else
-                               iput(inode);
+                       iput(inode);
                } else {
                        btrfs_put_ordered_extent(ordered);
                }
@@ -451,7 +430,7 @@ again:
                                btrfs_wait_ordered_range(inode, 0, (u64)-1);
                        else
                                filemap_flush(inode->i_mapping);
-                       btrfs_add_delayed_iput(inode);
+                       iput(inode);
                }
 
                cond_resched();
@@ -610,7 +589,7 @@ out:
  * After an extent is done, call this to conditionally update the on disk
  * i_size.  i_size is updated to cover any fully written part of the file.
  */
-int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
+int btrfs_ordered_update_i_size(struct inode *inode,
                                struct btrfs_ordered_extent *ordered)
 {
        struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
@@ -618,32 +597,18 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
        u64 disk_i_size;
        u64 new_i_size;
        u64 i_size_test;
-       u64 i_size = i_size_read(inode);
        struct rb_node *node;
-       struct rb_node *prev = NULL;
        struct btrfs_ordered_extent *test;
-       int ret = 1;
-
-       if (ordered)
-               offset = entry_end(ordered);
-       else
-               offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
 
        mutex_lock(&tree->mutex);
        disk_i_size = BTRFS_I(inode)->disk_i_size;
 
-       /* truncate file */
-       if (disk_i_size > i_size) {
-               BTRFS_I(inode)->disk_i_size = i_size;
-               ret = 0;
-               goto out;
-       }
-
        /*
         * if the disk i_size is already at the inode->i_size, or
         * this ordered extent is inside the disk i_size, we're done
         */
-       if (disk_i_size == i_size || offset <= disk_i_size) {
+       if (disk_i_size >= inode->i_size ||
+           ordered->file_offset + ordered->len <= disk_i_size) {
                goto out;
        }
 
@@ -651,7 +616,8 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
         * we can't update the disk_isize if there are delalloc bytes
         * between disk_i_size and  this ordered extent
         */
-       if (test_range_bit(io_tree, disk_i_size, offset - 1,
+       if (test_range_bit(io_tree, disk_i_size,
+                          ordered->file_offset + ordered->len - 1,
                           EXTENT_DELALLOC, 0, NULL)) {
                goto out;
        }
@@ -660,32 +626,20 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
         * if we find an ordered extent then we can't update disk i_size
         * yet
         */
-       if (ordered) {
-               node = rb_prev(&ordered->rb_node);
-       } else {
-               prev = tree_search(tree, offset);
-               /*
-                * we insert file extents without involving ordered struct,
-                * so there should be no ordered struct cover this offset
-                */
-               if (prev) {
-                       test = rb_entry(prev, struct btrfs_ordered_extent,
-                                       rb_node);
-                       BUG_ON(offset_in_entry(test, offset));
-               }
-               node = prev;
-       }
-       while (node) {
+       node = &ordered->rb_node;
+       while (1) {
+               node = rb_prev(node);
+               if (!node)
+                       break;
                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
                if (test->file_offset + test->len <= disk_i_size)
                        break;
-               if (test->file_offset >= i_size)
+               if (test->file_offset >= inode->i_size)
                        break;
                if (test->file_offset >= disk_i_size)
                        goto out;
-               node = rb_prev(node);
        }
-       new_i_size = min_t(u64, offset, i_size);
+       new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
 
        /*
         * at this point, we know we can safely update i_size to at least
@@ -693,14 +647,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
         * walk forward and see if ios from higher up in the file have
         * finished.
         */
-       if (ordered) {
-               node = rb_next(&ordered->rb_node);
-       } else {
-               if (prev)
-                       node = rb_next(prev);
-               else
-                       node = rb_first(&tree->tree);
-       }
+       node = rb_next(&ordered->rb_node);
        i_size_test = 0;
        if (node) {
                /*
@@ -708,10 +655,10 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                 * between our ordered extent and the next one.
                 */
                test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
-               if (test->file_offset > offset)
+               if (test->file_offset > entry_end(ordered))
                        i_size_test = test->file_offset;
        } else {
-               i_size_test = i_size;
+               i_size_test = i_size_read(inode);
        }
 
        /*
@@ -720,25 +667,15 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
         * are no delalloc bytes in this area, it is safe to update
         * disk_i_size to the end of the region.
         */
-       if (i_size_test > offset &&
-           !test_range_bit(io_tree, offset, i_size_test - 1,
-                           EXTENT_DELALLOC, 0, NULL)) {
-               new_i_size = min_t(u64, i_size_test, i_size);
+       if (i_size_test > entry_end(ordered) &&
+           !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
+                          EXTENT_DELALLOC, 0, NULL)) {
+               new_i_size = min_t(u64, i_size_test, i_size_read(inode));
        }
        BTRFS_I(inode)->disk_i_size = new_i_size;
-       ret = 0;
 out:
-       /*
-        * we need to remove the ordered extent with the tree lock held
-        * so that other people calling this function don't find our fully
-        * processed ordered entry and skip updating the i_size
-        */
-       if (ordered)
-               __btrfs_remove_ordered_extent(inode, ordered);
        mutex_unlock(&tree->mutex);
-       if (ordered)
-               wake_up(&ordered->wait);
-       return ret;
+       return 0;
 }
 
 /*
index 1fe1282ef47c998aa9cde1c0c43e4f088e7e767c..f82e87488ca8f47d158c37281a3aafa746d5031d 100644 (file)
@@ -150,13 +150,12 @@ void btrfs_start_ordered_extent(struct inode *inode,
 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
 struct btrfs_ordered_extent *
 btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
-int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
+int btrfs_ordered_update_i_size(struct inode *inode,
                                struct btrfs_ordered_extent *ordered);
 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
+int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
 int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                struct inode *inode);
-int btrfs_wait_ordered_extents(struct btrfs_root *root,
-                              int nocow_only, int delay_iput);
 #endif
index ab7ab53187452aa7794bfcdfb5596385d3cc77b2..cfcc93c93a7b4db99b87ef3b91b30cc8d2f7e36b 100644 (file)
@@ -1561,20 +1561,6 @@ static int invalidate_extent_cache(struct btrfs_root *root,
        return 0;
 }
 
-static void put_inodes(struct list_head *list)
-{
-       struct inodevec *ivec;
-       while (!list_empty(list)) {
-               ivec = list_entry(list->next, struct inodevec, list);
-               list_del(&ivec->list);
-               while (ivec->nr > 0) {
-                       ivec->nr--;
-                       iput(ivec->inode[ivec->nr]);
-               }
-               kfree(ivec);
-       }
-}
-
 static int find_next_key(struct btrfs_path *path, int level,
                         struct btrfs_key *key)
 
@@ -1737,11 +1723,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
 
                btrfs_btree_balance_dirty(root, nr);
 
-               /*
-                * put inodes outside transaction, otherwise we may deadlock.
-                */
-               put_inodes(&inode_list);
-
                if (replaced && rc->stage == UPDATE_DATA_PTRS)
                        invalidate_extent_cache(root, &key, &next_key);
        }
@@ -1771,7 +1752,19 @@ out:
 
        btrfs_btree_balance_dirty(root, nr);
 
-       put_inodes(&inode_list);
+       /*
+        * put inodes while we aren't holding the tree locks
+        */
+       while (!list_empty(&inode_list)) {
+               struct inodevec *ivec;
+               ivec = list_entry(inode_list.next, struct inodevec, list);
+               list_del(&ivec->list);
+               while (ivec->nr > 0) {
+                       ivec->nr--;
+                       iput(ivec->inode[ivec->nr]);
+               }
+               kfree(ivec);
+       }
 
        if (replaced && rc->stage == UPDATE_DATA_PTRS)
                invalidate_extent_cache(root, &key, &next_key);
@@ -3281,10 +3274,8 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
                return -ENOMEM;
 
        path = btrfs_alloc_path();
-       if (!path) {
-               kfree(cluster);
+       if (!path)
                return -ENOMEM;
-       }
 
        rc->extents_found = 0;
        rc->extents_skipped = 0;
@@ -3543,8 +3534,8 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
               (unsigned long long)rc->block_group->key.objectid,
               (unsigned long long)rc->block_group->flags);
 
-       btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
-       btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0);
+       btrfs_start_delalloc_inodes(fs_info->tree_root);
+       btrfs_wait_ordered_extents(fs_info->tree_root, 0);
 
        while (1) {
                rc->extents_found = 0;
@@ -3764,8 +3755,6 @@ out:
                                       BTRFS_DATA_RELOC_TREE_OBJECTID);
                if (IS_ERR(fs_root))
                        err = PTR_ERR(fs_root);
-               else
-                       btrfs_orphan_cleanup(fs_root);
        }
        return err;
 }
index a649305b205968e024a967555520581c00f3356e..752a5463bf53aee147068c2a0a9f678e12250f38 100644 (file)
@@ -126,9 +126,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
 {
        struct btrfs_fs_info *info = root->fs_info;
        substring_t args[MAX_OPT_ARGS];
-       char *p, *num, *orig;
+       char *p, *num;
        int intarg;
-       int ret = 0;
 
        if (!options)
                return 0;
@@ -141,7 +140,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
        if (!options)
                return -ENOMEM;
 
-       orig = options;
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
@@ -264,18 +262,12 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_discard:
                        btrfs_set_opt(info->mount_opt, DISCARD);
                        break;
-               case Opt_err:
-                       printk(KERN_INFO "btrfs: unrecognized mount option "
-                              "'%s'\n", p);
-                       ret = -EINVAL;
-                       goto out;
                default:
                        break;
                }
        }
-out:
-       kfree(orig);
-       return ret;
+       kfree(options);
+       return 0;
 }
 
 /*
@@ -413,8 +405,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
                return 0;
        }
 
-       btrfs_start_delalloc_inodes(root, 0);
-       btrfs_wait_ordered_extents(root, 0, 0);
+       btrfs_start_delalloc_inodes(root);
+       btrfs_wait_ordered_extents(root, 0);
 
        trans = btrfs_start_transaction(root, 1);
        ret = btrfs_commit_transaction(trans, root);
@@ -458,8 +450,6 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",notreelog");
        if (btrfs_test_opt(root, FLUSHONCOMMIT))
                seq_puts(seq, ",flushoncommit");
-       if (btrfs_test_opt(root, DISCARD))
-               seq_puts(seq, ",discard");
        if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
                seq_puts(seq, ",noacl");
        return 0;
index b2acc79f1b342e651a4c31da168c2952efa57ff2..c207e8c32c9bfc5212e111edb440adf4b59f2059 100644 (file)
@@ -333,9 +333,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
        memset(trans, 0, sizeof(*trans));
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
-       if (throttle)
-               btrfs_run_delayed_iputs(root);
-
        return 0;
 }
 
@@ -357,7 +354,7 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  * those extents are sent to disk but does not wait on them
  */
 int btrfs_write_marked_extents(struct btrfs_root *root,
-                              struct extent_io_tree *dirty_pages, int mark)
+                              struct extent_io_tree *dirty_pages)
 {
        int ret;
        int err = 0;
@@ -370,7 +367,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
 
        while (1) {
                ret = find_first_extent_bit(dirty_pages, start, &start, &end,
-                                           mark);
+                                           EXTENT_DIRTY);
                if (ret)
                        break;
                while (start <= end) {
@@ -416,7 +413,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
  * on all the pages and clear them from the dirty pages state tree
  */
 int btrfs_wait_marked_extents(struct btrfs_root *root,
-                             struct extent_io_tree *dirty_pages, int mark)
+                             struct extent_io_tree *dirty_pages)
 {
        int ret;
        int err = 0;
@@ -428,12 +425,12 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
        unsigned long index;
 
        while (1) {
-               ret = find_first_extent_bit(dirty_pages, start, &start, &end,
-                                           mark);
+               ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
+                                           EXTENT_DIRTY);
                if (ret)
                        break;
 
-               clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
+               clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
                while (start <= end) {
                        index = start >> PAGE_CACHE_SHIFT;
                        start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
@@ -463,13 +460,13 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
  * those extents are on disk for transaction or log commit
  */
 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
-                               struct extent_io_tree *dirty_pages, int mark)
+                                       struct extent_io_tree *dirty_pages)
 {
        int ret;
        int ret2;
 
-       ret = btrfs_write_marked_extents(root, dirty_pages, mark);
-       ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
+       ret = btrfs_write_marked_extents(root, dirty_pages);
+       ret2 = btrfs_wait_marked_extents(root, dirty_pages);
        return ret || ret2;
 }
 
@@ -482,8 +479,7 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
                return filemap_write_and_wait(btree_inode->i_mapping);
        }
        return btrfs_write_and_wait_marked_extents(root,
-                                          &trans->transaction->dirty_pages,
-                                          EXTENT_DIRTY);
+                                          &trans->transaction->dirty_pages);
 }
 
 /*
@@ -501,16 +497,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
 {
        int ret;
        u64 old_root_bytenr;
-       u64 old_root_used;
        struct btrfs_root *tree_root = root->fs_info->tree_root;
 
-       old_root_used = btrfs_root_used(&root->root_item);
        btrfs_write_dirty_block_groups(trans, root);
 
        while (1) {
                old_root_bytenr = btrfs_root_bytenr(&root->root_item);
-               if (old_root_bytenr == root->node->start &&
-                   old_root_used == btrfs_root_used(&root->root_item))
+               if (old_root_bytenr == root->node->start)
                        break;
 
                btrfs_set_root_node(&root->root_item, root->node);
@@ -519,7 +512,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
                                        &root->root_item);
                BUG_ON(ret);
 
-               old_root_used = btrfs_root_used(&root->root_item);
                ret = btrfs_write_dirty_block_groups(trans, root);
                BUG_ON(ret);
        }
@@ -803,6 +795,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        memcpy(&pending->root_key, &key, sizeof(key));
 fail:
        kfree(new_root_item);
+       btrfs_unreserve_metadata_space(root, 6);
        return ret;
 }
 
@@ -814,6 +807,7 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
        u64 index = 0;
        struct btrfs_trans_handle *trans;
        struct inode *parent_inode;
+       struct inode *inode;
        struct btrfs_root *parent_root;
 
        parent_inode = pending->dentry->d_parent->d_inode;
@@ -845,6 +839,8 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
 
        BUG_ON(ret);
 
+       inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
+       d_instantiate(pending->dentry, inode);
 fail:
        btrfs_end_transaction(trans, fs_info->fs_root);
        return ret;
@@ -998,11 +994,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                mutex_unlock(&root->fs_info->trans_mutex);
 
                if (flush_on_commit) {
-                       btrfs_start_delalloc_inodes(root, 1);
-                       ret = btrfs_wait_ordered_extents(root, 0, 1);
+                       btrfs_start_delalloc_inodes(root);
+                       ret = btrfs_wait_ordered_extents(root, 0);
                        BUG_ON(ret);
                } else if (snap_pending) {
-                       ret = btrfs_wait_ordered_extents(root, 0, 1);
+                       ret = btrfs_wait_ordered_extents(root, 1);
                        BUG_ON(ret);
                }
 
@@ -1120,10 +1116,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                current->journal_info = NULL;
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
-
-       if (current != root->fs_info->transaction_kthread)
-               btrfs_run_delayed_iputs(root);
-
        return ret;
 }
 
index 93c7ccb33118f38d420c00c3b790fe0040b7f0ca..d4e3e7a6938cddebb56e05ae4131b7055f08be05 100644 (file)
@@ -107,10 +107,10 @@ void btrfs_throttle(struct btrfs_root *root);
 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root);
 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
-                               struct extent_io_tree *dirty_pages, int mark);
+                                       struct extent_io_tree *dirty_pages);
 int btrfs_write_marked_extents(struct btrfs_root *root,
-                               struct extent_io_tree *dirty_pages, int mark);
+                                       struct extent_io_tree *dirty_pages);
 int btrfs_wait_marked_extents(struct btrfs_root *root,
-                               struct extent_io_tree *dirty_pages, int mark);
+                                       struct extent_io_tree *dirty_pages);
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
 #endif
index 4a9434b622ecfc0f571c32ea3abf54df3d878123..741666a7676a80a6553f5b22a37a41dd31971d3e 100644 (file)
@@ -542,8 +542,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 
        saved_nbytes = inode_get_bytes(inode);
        /* drop any overlapping extents */
-       ret = btrfs_drop_extents(trans, inode, start, extent_end,
-                                &alloc_hint, 1);
+       ret = btrfs_drop_extents(trans, root, inode,
+                        start, extent_end, extent_end, start, &alloc_hint, 1);
        BUG_ON(ret);
 
        if (found_type == BTRFS_FILE_EXTENT_REG ||
@@ -930,17 +930,6 @@ out_nowrite:
        return 0;
 }
 
-static int insert_orphan_item(struct btrfs_trans_handle *trans,
-                             struct btrfs_root *root, u64 offset)
-{
-       int ret;
-       ret = btrfs_find_orphan_item(root, offset);
-       if (ret > 0)
-               ret = btrfs_insert_orphan_item(trans, root, offset);
-       return ret;
-}
-
-
 /*
  * There are a few corners where the link count of the file can't
  * be properly maintained during replay.  So, instead of adding
@@ -1008,13 +997,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        }
        BTRFS_I(inode)->index_cnt = (u64)-1;
 
-       if (inode->i_nlink == 0) {
-               if (S_ISDIR(inode->i_mode)) {
-                       ret = replay_dir_deletes(trans, root, NULL, path,
-                                                inode->i_ino, 1);
-                       BUG_ON(ret);
-               }
-               ret = insert_orphan_item(trans, root, inode->i_ino);
+       if (inode->i_nlink == 0 && S_ISDIR(inode->i_mode)) {
+               ret = replay_dir_deletes(trans, root, NULL, path,
+                                        inode->i_ino, 1);
                BUG_ON(ret);
        }
        btrfs_free_path(path);
@@ -1602,6 +1587,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                /* inode keys are done during the first stage */
                if (key.type == BTRFS_INODE_ITEM_KEY &&
                    wc->stage == LOG_WALK_REPLAY_INODES) {
+                       struct inode *inode;
                        struct btrfs_inode_item *inode_item;
                        u32 mode;
 
@@ -1617,16 +1603,31 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                                             eb, i, &key);
                        BUG_ON(ret);
 
-                       /* for regular files, make sure corresponding
-                        * orhpan item exist. extents past the new EOF
-                        * will be truncated later by orphan cleanup.
+                       /* for regular files, truncate away
+                        * extents past the new EOF
                         */
                        if (S_ISREG(mode)) {
-                               ret = insert_orphan_item(wc->trans, root,
-                                                        key.objectid);
+                               inode = read_one_inode(root,
+                                                      key.objectid);
+                               BUG_ON(!inode);
+
+                               ret = btrfs_truncate_inode_items(wc->trans,
+                                       root, inode, inode->i_size,
+                                       BTRFS_EXTENT_DATA_KEY);
                                BUG_ON(ret);
-                       }
 
+                               /* if the nlink count is zero here, the iput
+                                * will free the inode.  We bump it to make
+                                * sure it doesn't get freed until the link
+                                * count fixup is done
+                                */
+                               if (inode->i_nlink == 0) {
+                                       btrfs_inc_nlink(inode);
+                                       btrfs_update_inode(wc->trans,
+                                                          root, inode);
+                               }
+                               iput(inode);
+                       }
                        ret = link_to_fixup_dir(wc->trans, root,
                                                path, key.objectid);
                        BUG_ON(ret);
@@ -1976,11 +1977,10 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 {
        int index1;
        int index2;
-       int mark;
        int ret;
        struct btrfs_root *log = root->log_root;
        struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
-       unsigned long log_transid = 0;
+       u64 log_transid = 0;
 
        mutex_lock(&root->log_mutex);
        index1 = root->log_transid % 2;
@@ -2014,29 +2014,24 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                goto out;
        }
 
-       log_transid = root->log_transid;
-       if (log_transid % 2 == 0)
-               mark = EXTENT_DIRTY;
-       else
-               mark = EXTENT_NEW;
-
        /* we start IO on  all the marked extents here, but we don't actually
         * wait for them until later.
         */
-       ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
+       ret = btrfs_write_marked_extents(log, &log->dirty_log_pages);
        BUG_ON(ret);
 
        btrfs_set_root_node(&log->root_item, log->node);
 
        root->log_batch = 0;
+       log_transid = root->log_transid;
        root->log_transid++;
        log->log_transid = root->log_transid;
        root->log_start_pid = 0;
        smp_mb();
        /*
-        * IO has been started, blocks of the log tree have WRITTEN flag set
-        * in their headers. new modifications of the log will be written to
-        * new positions. so it's safe to allow log writers to go in.
+        * log tree has been flushed to disk, new modifications of
+        * the log will be written to new positions. so it's safe to
+        * allow log writers to go in.
         */
        mutex_unlock(&root->log_mutex);
 
@@ -2057,7 +2052,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 
        index2 = log_root_tree->log_transid % 2;
        if (atomic_read(&log_root_tree->log_commit[index2])) {
-               btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+               btrfs_wait_marked_extents(log, &log->dirty_log_pages);
                wait_log_commit(trans, log_root_tree,
                                log_root_tree->log_transid);
                mutex_unlock(&log_root_tree->log_mutex);
@@ -2077,17 +2072,16 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         * check the full commit flag again
         */
        if (root->fs_info->last_trans_log_full_commit == trans->transid) {
-               btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+               btrfs_wait_marked_extents(log, &log->dirty_log_pages);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = -EAGAIN;
                goto out_wake_log_root;
        }
 
        ret = btrfs_write_and_wait_marked_extents(log_root_tree,
-                               &log_root_tree->dirty_log_pages,
-                               EXTENT_DIRTY | EXTENT_NEW);
+                               &log_root_tree->dirty_log_pages);
        BUG_ON(ret);
-       btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+       btrfs_wait_marked_extents(log, &log->dirty_log_pages);
 
        btrfs_set_super_log_root(&root->fs_info->super_for_commit,
                                log_root_tree->node->start);
@@ -2153,12 +2147,12 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
 
        while (1) {
                ret = find_first_extent_bit(&log->dirty_log_pages,
-                               0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
+                                   0, &start, &end, EXTENT_DIRTY);
                if (ret)
                        break;
 
-               clear_extent_bits(&log->dirty_log_pages, start, end,
-                                 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
+               clear_extent_dirty(&log->dirty_log_pages,
+                                  start, end, GFP_NOFS);
        }
 
        if (log->log_transid > 0) {
index 41ecbb2347f2d3171f7645782ab4e5a39621c84d..7eda483d7b5aa0cd5e5bd96d149cca6fe2fc8f16 100644 (file)
@@ -1135,7 +1135,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                root->fs_info->avail_metadata_alloc_bits;
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
-           root->fs_info->fs_devices->num_devices <= 4) {
+           root->fs_info->fs_devices->rw_devices <= 4) {
                printk(KERN_ERR "btrfs: unable to go below four devices "
                       "on raid10\n");
                ret = -EINVAL;
@@ -1143,7 +1143,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        }
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
-           root->fs_info->fs_devices->num_devices <= 2) {
+           root->fs_info->fs_devices->rw_devices <= 2) {
                printk(KERN_ERR "btrfs: unable to go below two "
                       "devices on raid1\n");
                ret = -EINVAL;
@@ -1434,8 +1434,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                return -EINVAL;
 
        bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
-       if (IS_ERR(bdev))
-               return PTR_ERR(bdev);
+       if (!bdev)
+               return -EIO;
 
        if (root->fs_info->fs_devices->seeding) {
                seeding_dev = 1;
@@ -2209,7 +2209,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                max_chunk_size = 10 * calc_size;
                min_stripe_size = 64 * 1024 * 1024;
        } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
-               max_chunk_size = 256 * 1024 * 1024;
+               max_chunk_size = 4 * calc_size;
                min_stripe_size = 32 * 1024 * 1024;
        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
                calc_size = 8 * 1024 * 1024;
@@ -2538,11 +2538,6 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
        if (!em)
                return 1;
 
-       if (btrfs_test_opt(root, DEGRADED)) {
-               free_extent_map(em);
-               return 0;
-       }
-
        map = (struct map_lookup *)em->bdev;
        for (i = 0; i < map->num_stripes; i++) {
                if (!map->stripes[i].dev->writeable) {
@@ -2654,10 +2649,8 @@ again:
        em = lookup_extent_mapping(em_tree, logical, *length);
        read_unlock(&em_tree->lock);
 
-       if (!em && unplug_page) {
-               kfree(multi);
+       if (!em && unplug_page)
                return 0;
-       }
 
        if (!em) {
                printk(KERN_CRIT "unable to find logical %llu len %llu\n",
index 193b58f7d3f3b36d027a85756c917aa7191f99b2..b6dd5967c48a2785074f75db2dacd6a7370a9ec2 100644 (file)
@@ -85,23 +85,22 @@ out:
        return ret;
 }
 
-static int do_setxattr(struct btrfs_trans_handle *trans,
-                      struct inode *inode, const char *name,
-                      const void *value, size_t size, int flags)
+int __btrfs_setxattr(struct inode *inode, const char *name,
+                           const void *value, size_t size, int flags)
 {
        struct btrfs_dir_item *di;
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_trans_handle *trans;
        struct btrfs_path *path;
-       size_t name_len = strlen(name);
-       int ret = 0;
-
-       if (name_len + size > BTRFS_MAX_XATTR_SIZE(root))
-               return -ENOSPC;
+       int ret = 0, mod = 0;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
+       trans = btrfs_join_transaction(root, 1);
+       btrfs_set_trans_block_group(trans, inode);
+
        /* first lets see if we already have this xattr */
        di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
                                strlen(name), -1);
@@ -119,12 +118,15 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
                }
 
                ret = btrfs_delete_one_dir_name(trans, root, path, di);
-               BUG_ON(ret);
+               if (ret)
+                       goto out;
                btrfs_release_path(root, path);
 
                /* if we don't have a value then we are removing the xattr */
-               if (!value)
+               if (!value) {
+                       mod = 1;
                        goto out;
+               }
        } else {
                btrfs_release_path(root, path);
 
@@ -136,45 +138,20 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
        }
 
        /* ok we have to create a completely new xattr */
-       ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino,
-                                     name, name_len, value, size);
-       BUG_ON(ret);
-out:
-       btrfs_free_path(path);
-       return ret;
-}
-
-int __btrfs_setxattr(struct btrfs_trans_handle *trans,
-                    struct inode *inode, const char *name,
-                    const void *value, size_t size, int flags)
-{
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       int ret;
-
-       if (trans)
-               return do_setxattr(trans, inode, name, value, size, flags);
-
-       ret = btrfs_reserve_metadata_space(root, 2);
+       ret = btrfs_insert_xattr_item(trans, root, name, strlen(name),
+                                     value, size, inode->i_ino);
        if (ret)
-               return ret;
-
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans) {
-               ret = -ENOMEM;
                goto out;
-       }
-       btrfs_set_trans_block_group(trans, inode);
+       mod = 1;
 
-       ret = do_setxattr(trans, inode, name, value, size, flags);
-       if (ret)
-               goto out;
-
-       inode->i_ctime = CURRENT_TIME;
-       ret = btrfs_update_inode(trans, root, inode);
-       BUG_ON(ret);
 out:
-       btrfs_end_transaction_throttle(trans, root);
-       btrfs_unreserve_metadata_space(root, 2);
+       if (mod) {
+               inode->i_ctime = CURRENT_TIME;
+               ret = btrfs_update_inode(trans, root, inode);
+       }
+
+       btrfs_end_transaction(trans, root);
+       btrfs_free_path(path);
        return ret;
 }
 
@@ -337,9 +314,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
 
        if (size == 0)
                value = "";  /* empty EA, do not remove */
-
-       return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size,
-                               flags);
+       return __btrfs_setxattr(dentry->d_inode, name, value, size, flags);
 }
 
 int btrfs_removexattr(struct dentry *dentry, const char *name)
@@ -354,13 +329,10 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
 
        if (!btrfs_is_valid_xattr(name))
                return -EOPNOTSUPP;
-
-       return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0,
-                               XATTR_REPLACE);
+       return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
 }
 
-int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
-                             struct inode *inode, struct inode *dir)
+int btrfs_xattr_security_init(struct inode *inode, struct inode *dir)
 {
        int err;
        size_t len;
@@ -382,7 +354,7 @@ int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
        } else {
                strcpy(name, XATTR_SECURITY_PREFIX);
                strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
-               err = __btrfs_setxattr(trans, inode, name, value, len, 0);
+               err = __btrfs_setxattr(inode, name, value, len, 0);
                kfree(name);
        }
 
index 721efa0346e037c2f6f32c91d7f284fbc580c87f..c71e9c3cf3f749e8981d19433bef8ada685383aa 100644 (file)
@@ -27,16 +27,15 @@ extern struct xattr_handler *btrfs_xattr_handlers[];
 
 extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
                void *buffer, size_t size);
-extern int __btrfs_setxattr(struct btrfs_trans_handle *trans,
-                           struct inode *inode, const char *name,
-                           const void *value, size_t size, int flags);
+extern int __btrfs_setxattr(struct inode *inode, const char *name,
+               const void *value, size_t size, int flags);
+
 extern ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
                void *buffer, size_t size);
 extern int btrfs_setxattr(struct dentry *dentry, const char *name,
                const void *value, size_t size, int flags);
 extern int btrfs_removexattr(struct dentry *dentry, const char *name);
 
-extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
-                                    struct inode *inode, struct inode *dir);
+extern int btrfs_xattr_security_init(struct inode *inode, struct inode *dir);
 
 #endif /* __XATTR__ */
index 039b5011d83b0f1481ec200f1229db568f733a02..b5808cdb22325f0f21a1442910690772ad60431c 100644 (file)
@@ -77,8 +77,6 @@ static int cachefiles_check_cache_dir(struct cachefiles_cache *cache,
 /*
  * check the security details of the on-disk cache
  * - must be called with security override in force
- * - must return with a security override in force - even in the case of an
- *   error
  */
 int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
                                        struct dentry *root,
@@ -101,8 +99,6 @@ int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
         * which create files */
        ret = set_create_files_as(new, root->d_inode);
        if (ret < 0) {
-               abort_creds(new);
-               cachefiles_begin_secure(cache, _saved_cred);
                _leave(" = %d [cfa]", ret);
                return ret;
        }
index be7613ebb6618fcb25bd2de01f44ffbac554f6cb..d6db933df2b27ce43c0fe31d6e35bb0968598891 100644 (file)
@@ -39,9 +39,7 @@ struct backing_dev_info directly_mappable_cdev_bdi = {
 #endif
                /* permit direct mmap, for read, write or exec */
                BDI_CAP_MAP_DIRECT |
-               BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
-               /* no writeback happens */
-               BDI_CAP_NO_ACCT_AND_WRITEBACK),
+               BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
 };
 
 static struct kobj_map *cdev_map;
index 144540764dec7624943d5e4c3677a4299f7e0219..29f1da761bbf10fc948f16e0c77a0d75dea22125 100644 (file)
@@ -1033,7 +1033,7 @@ init_cifs(void)
                goto out_unregister_filesystem;
 #endif
 #ifdef CONFIG_CIFS_DFS_UPCALL
-       rc = cifs_init_dns_resolver();
+       rc = register_key_type(&key_type_dns_resolver);
        if (rc)
                goto out_unregister_key_type;
 #endif
@@ -1045,7 +1045,7 @@ init_cifs(void)
 
  out_unregister_resolver_key:
 #ifdef CONFIG_CIFS_DFS_UPCALL
-       cifs_exit_dns_resolver();
+       unregister_key_type(&key_type_dns_resolver);
  out_unregister_key_type:
 #endif
 #ifdef CONFIG_CIFS_UPCALL
@@ -1071,7 +1071,7 @@ exit_cifs(void)
        cifs_proc_clean();
 #ifdef CONFIG_CIFS_DFS_UPCALL
        cifs_dfs_release_automount_timer();
-       cifs_exit_dns_resolver();
+       unregister_key_type(&key_type_dns_resolver);
 #endif
 #ifdef CONFIG_CIFS_UPCALL
        unregister_key_type(&cifs_spnego_key_type);
index c4dbc633361f35a2df0f8cffe18870d7ef4f159b..5d0fde18039c6780ed8e0dcad3ea3fe7ca07a094 100644 (file)
@@ -499,7 +499,6 @@ struct dfs_info3_param {
 #define CIFS_FATTR_DFS_REFERRAL                0x1
 #define CIFS_FATTR_DELETE_PENDING      0x2
 #define CIFS_FATTR_NEED_REVAL          0x4
-#define CIFS_FATTR_INO_COLLISION       0x8
 
 struct cifs_fattr {
        u32             cf_flags;
index 05a9b776e1a8132a3853a09f20ae13d8f79abd8e..5646727e33f543f70ec449b390d8955b66852dc9 100644 (file)
@@ -95,10 +95,8 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
                                __u16 fileHandle, struct file *file,
                                struct vfsmount *mnt, unsigned int oflags);
 extern int cifs_posix_open(char *full_path, struct inode **pinode,
-                               struct vfsmount *mnt,
-                               struct super_block *sb,
-                               int mode, int oflags,
-                               __u32 *poplock, __u16 *pnetfid, int xid);
+                          struct vfsmount *mnt, int mode, int oflags,
+                          __u32 *poplock, __u16 *pnetfid, int xid);
 extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
                                     FILE_UNIX_BASIC_INFO *info,
                                     struct cifs_sb_info *cifs_sb);
index 4e6dbab84435b6b47bbcbd6b1e04b1a8729b1026..941441d3e3860163a51dd5f737cfe9771f283fa9 100644 (file)
@@ -1430,8 +1430,6 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
        __u32 bytes_sent;
        __u16 byte_count;
 
-       *nbytes = 0;
-
        /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
        if (tcon->ses == NULL)
                return -ECONNABORTED;
@@ -1514,18 +1512,11 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
        cifs_stats_inc(&tcon->num_writes);
        if (rc) {
                cFYI(1, ("Send error in write = %d", rc));
+               *nbytes = 0;
        } else {
                *nbytes = le16_to_cpu(pSMBr->CountHigh);
                *nbytes = (*nbytes) << 16;
                *nbytes += le16_to_cpu(pSMBr->Count);
-
-               /*
-                * Mask off high 16 bits when bytes written as returned by the
-                * server is greater than bytes requested by the client. Some
-                * OS/2 servers are known to set incorrect CountHigh values.
-                */
-               if (*nbytes > count)
-                       *nbytes &= 0xFFFF;
        }
 
        cifs_buf_release(pSMB);
@@ -1614,14 +1605,6 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
                *nbytes = le16_to_cpu(pSMBr->CountHigh);
                *nbytes = (*nbytes) << 16;
                *nbytes += le16_to_cpu(pSMBr->Count);
-
-               /*
-                * Mask off high 16 bits when bytes written as returned by the
-                * server is greater than bytes requested by the client. OS/2
-                * servers are known to set incorrect CountHigh values.
-                */
-               if (*nbytes > count)
-                       *nbytes &= 0xFFFF;
        }
 
 /*     cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
index c3d6182d0ebe4a6cb926482a66694791eced5db4..1f42f772865a54f1f45b516b090533a4a4715bf4 100644 (file)
@@ -183,14 +183,13 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
 }
 
 int cifs_posix_open(char *full_path, struct inode **pinode,
-                       struct vfsmount *mnt, struct super_block *sb,
-                       int mode, int oflags,
-                       __u32 *poplock, __u16 *pnetfid, int xid)
+                   struct vfsmount *mnt, int mode, int oflags,
+                   __u32 *poplock, __u16 *pnetfid, int xid)
 {
        int rc;
        FILE_UNIX_BASIC_INFO *presp_data;
        __u32 posix_flags = 0;
-       struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
        struct cifs_fattr fattr;
 
        cFYI(1, ("posix open %s", full_path));
@@ -242,7 +241,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
 
        /* get new inode and set it up */
        if (*pinode == NULL) {
-               *pinode = cifs_iget(sb, &fattr);
+               *pinode = cifs_iget(mnt->mnt_sb, &fattr);
                if (!*pinode) {
                        rc = -ENOMEM;
                        goto posix_open_ret;
@@ -251,8 +250,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
                cifs_fattr_to_inode(*pinode, &fattr);
        }
 
-       if (mnt)
-               cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
+       cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
 
 posix_open_ret:
        kfree(presp_data);
@@ -316,14 +314,13 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
        if (nd && (nd->flags & LOOKUP_OPEN))
                oflags = nd->intent.open.flags;
        else
-               oflags = FMODE_READ | SMB_O_CREAT;
+               oflags = FMODE_READ;
 
        if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
            (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                        le64_to_cpu(tcon->fsUnixInfo.Capability))) {
-               rc = cifs_posix_open(full_path, &newinode,
-                       nd ? nd->path.mnt : NULL,
-                       inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
+               rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
+                                    mode, oflags, &oplock, &fileHandle, xid);
                /* EIO could indicate that (posix open) operation is not
                   supported, despite what server claimed in capability
                   negotation.  EREMOTE indicates DFS junction, which is not
@@ -680,7 +677,6 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
                     (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
                     (nd->intent.open.flags & O_CREAT)) {
                        rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
-                                       parent_dir_inode->i_sb,
                                        nd->intent.open.create_mode,
                                        nd->intent.open.flags, &oplock,
                                        &fileHandle, xid);
index 31da21f0654cf1aa0249e35942a700f94388b1f7..87948147d7ece68d64e7218c8331624410ac2196 100644 (file)
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
-#include <linux/keyctl.h>
-#include <linux/key-type.h>
 #include <keys/user-type.h>
 #include "dns_resolve.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
 #include "cifs_debug.h"
 
-static const struct cred *dns_resolver_cache;
-
 /* Checks if supplied name is IP address
  * returns:
  *             1 - name is IP
@@ -97,7 +93,6 @@ struct key_type key_type_dns_resolver = {
 int
 dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
 {
-       const struct cred *saved_cred;
        int rc = -EAGAIN;
        struct key *rkey = ERR_PTR(-EAGAIN);
        char *name;
@@ -137,15 +132,8 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
                goto skip_upcall;
        }
 
-       saved_cred = override_creds(dns_resolver_cache);
        rkey = request_key(&key_type_dns_resolver, name, "");
-       revert_creds(saved_cred);
        if (!IS_ERR(rkey)) {
-               if (!(rkey->perm & KEY_USR_VIEW)) {
-                       down_read(&rkey->sem);
-                       rkey->perm |= KEY_USR_VIEW;
-                       up_read(&rkey->sem);
-               }
                len = rkey->type_data.x[0];
                data = rkey->payload.data;
        } else {
@@ -176,61 +164,4 @@ out:
        return rc;
 }
 
-int __init cifs_init_dns_resolver(void)
-{
-       struct cred *cred;
-       struct key *keyring;
-       int ret;
-
-       printk(KERN_NOTICE "Registering the %s key type\n",
-              key_type_dns_resolver.name);
-
-       /* create an override credential set with a special thread keyring in
-        * which DNS requests are cached
-        *
-        * this is used to prevent malicious redirections from being installed
-        * with add_key().
-        */
-       cred = prepare_kernel_cred(NULL);
-       if (!cred)
-               return -ENOMEM;
-
-       keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
-                           (KEY_POS_ALL & ~KEY_POS_SETATTR) |
-                           KEY_USR_VIEW | KEY_USR_READ,
-                           KEY_ALLOC_NOT_IN_QUOTA);
-       if (IS_ERR(keyring)) {
-               ret = PTR_ERR(keyring);
-               goto failed_put_cred;
-       }
-
-       ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
-       if (ret < 0)
-               goto failed_put_key;
-
-       ret = register_key_type(&key_type_dns_resolver);
-       if (ret < 0)
-               goto failed_put_key;
-
-       /* instruct request_key() to use this special keyring as a cache for
-        * the results it looks up */
-       cred->thread_keyring = keyring;
-       cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
-       dns_resolver_cache = cred;
-       return 0;
-
-failed_put_key:
-       key_put(keyring);
-failed_put_cred:
-       put_cred(cred);
-       return ret;
-}
 
-void cifs_exit_dns_resolver(void)
-{
-       key_revoke(dns_resolver_cache->thread_keyring);
-       unregister_key_type(&key_type_dns_resolver);
-       put_cred(dns_resolver_cache);
-       printk(KERN_NOTICE "Unregistered %s key type\n",
-              key_type_dns_resolver.name);
-}
index 763237aa2a23e0bcd3fc0ed65fcd6e0ee7c2ea8d..966e9288930be75bc9d952cde2960ef3191db212 100644 (file)
 #define _DNS_RESOLVE_H
 
 #ifdef __KERNEL__
-#include <linux/module.h>
-
-extern int __init cifs_init_dns_resolver(void);
-extern void cifs_exit_dns_resolver(void);
+#include <linux/key-type.h>
+extern struct key_type key_type_dns_resolver;
 extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
 #endif /* KERNEL */
 
index 5d1099a20cceda7f33a85869bf810a0b4f51af55..429337eb7afec9f0933c72927eedcc7412207c88 100644 (file)
@@ -295,12 +295,10 @@ int cifs_open(struct inode *inode, struct file *file)
            (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                        le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                int oflags = (int) cifs_posix_convert_flags(file->f_flags);
-               oflags |= SMB_O_CREAT;
                /* can not refresh inode info since size could be stale */
                rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
-                               inode->i_sb,
-                               cifs_sb->mnt_file_mode /* ignored */,
-                               oflags, &oplock, &netfid, xid);
+                                    cifs_sb->mnt_file_mode /* ignored */,
+                                    oflags, &oplock, &netfid, xid);
                if (rc == 0) {
                        cFYI(1, ("posix open succeeded"));
                        /* no need for special case handling of setting mode
@@ -512,9 +510,8 @@ reopen_error_exit:
                int oflags = (int) cifs_posix_convert_flags(file->f_flags);
                /* can not refresh inode info since size could be stale */
                rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
-                               inode->i_sb,
-                               cifs_sb->mnt_file_mode /* ignored */,
-                               oflags, &oplock, &netfid, xid);
+                                    cifs_sb->mnt_file_mode /* ignored */,
+                                    oflags, &oplock, &netfid, xid);
                if (rc == 0) {
                        cFYI(1, ("posix reopen succeeded"));
                        goto reopen_success;
index 303fd7f4dfe1a37a02dac79d38aae5bd7ea2e4c6..cababd8a52df32097f3219327ef491254ced34f0 100644 (file)
@@ -610,16 +610,6 @@ cifs_find_inode(struct inode *inode, void *opaque)
        if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
                return 0;
 
-       /*
-        * uh oh -- it's a directory. We can't use it since hardlinked dirs are
-        * verboten. Disable serverino and return it as if it were found, the
-        * caller can discard it, generate a uniqueid and retry the find
-        */
-       if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
-               fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
-               cifs_autodisable_serverino(CIFS_SB(inode->i_sb));
-       }
-
        return 1;
 }
 
@@ -639,22 +629,15 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
        unsigned long hash;
        struct inode *inode;
 
-retry_iget5_locked:
        cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
 
        /* hash down to 32-bits on 32-bit arch */
        hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
 
        inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
-       if (inode) {
-               /* was there a problematic inode number collision? */
-               if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
-                       iput(inode);
-                       fattr->cf_uniqueid = iunique(sb, ROOT_I);
-                       fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
-                       goto retry_iget5_locked;
-               }
 
+       /* we have fattrs in hand, update the inode */
+       if (inode) {
                cifs_fattr_to_inode(inode, fattr);
                if (sb->s_flags & MS_NOATIME)
                        inode->i_flags |= S_NOATIME | S_NOCMTIME;
@@ -1284,10 +1267,6 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
        if (rc == 0 || rc != -ETXTBSY)
                return rc;
 
-       /* open-file renames don't work across directories */
-       if (to_dentry->d_parent != from_dentry->d_parent)
-               return rc;
-
        /* open the file to be renamed -- we need DELETE perms */
        rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
                         CREATE_NOT_DIR, &srcfid, &oplock, NULL,
index 6d6ff4fe60ea4fdfdfffb1c6718746266028d92b..7085a6275c4c9f0635383033dde2ee572bb046b0 100644 (file)
@@ -723,7 +723,15 @@ ssetup_ntlmssp_authenticate:
 
                /* calculate session key */
                setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
-               /* FIXME: calculate MAC key */
+               if (first_time) /* should this be moved into common code
+                                  with similar ntlmv2 path? */
+               /*   cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
+                               response BB FIXME, v2_sess_key); */
+
+               /* copy session key */
+
+       /*      memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
+               bcc_ptr += LM2_SESS_KEY_SIZE; */
                memcpy(bcc_ptr, (char *)v2_sess_key,
                       sizeof(struct ntlmv2_resp));
                bcc_ptr += sizeof(struct ntlmv2_resp);
index d576b552e8e2eb90a98324e79b7d8917fb96c04c..6c19040ffeefecdd520fc1093b5ed9db31e8aa7f 100644 (file)
@@ -1532,6 +1532,8 @@ int compat_do_execve(char * filename,
        if (retval < 0)
                goto out;
 
+       current->stack_start = current->mm->start_stack;
+
        /* execve succeeded */
        current->fs->in_exec = 0;
        current->in_execve = 0;
index 4314f0d48d85668e0d0af943cdb2b7d8fde4863a..dc2ad6008b2d08a354ce23507c44febb121b6466 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2010 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -33,10 +33,10 @@ void dlm_del_ast(struct dlm_lkb *lkb)
        spin_unlock(&ast_queue_lock);
 }
 
-void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode)
+void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
 {
        if (lkb->lkb_flags & DLM_IFL_USER) {
-               dlm_user_add_ast(lkb, type, mode);
+               dlm_user_add_ast(lkb, type, bastmode);
                return;
        }
 
@@ -44,21 +44,10 @@ void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode)
        if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
                kref_get(&lkb->lkb_ref);
                list_add_tail(&lkb->lkb_astqueue, &ast_queue);
-               lkb->lkb_ast_first = type;
        }
-
-       /* sanity check, this should not happen */
-
-       if ((type == AST_COMP) && (lkb->lkb_ast_type & AST_COMP))
-               log_print("repeat cast %d castmode %d lock %x %s",
-                         mode, lkb->lkb_castmode,
-                         lkb->lkb_id, lkb->lkb_resource->res_name);
-
        lkb->lkb_ast_type |= type;
-       if (type == AST_BAST)
-               lkb->lkb_bastmode = mode;
-       else
-               lkb->lkb_castmode = mode;
+       if (bastmode)
+               lkb->lkb_bastmode = bastmode;
        spin_unlock(&ast_queue_lock);
 
        set_bit(WAKE_ASTS, &astd_wakeflags);
@@ -70,9 +59,9 @@ static void process_asts(void)
        struct dlm_ls *ls = NULL;
        struct dlm_rsb *r = NULL;
        struct dlm_lkb *lkb;
-       void (*castfn) (void *astparam);
-       void (*bastfn) (void *astparam, int mode);
-       int type, first, bastmode, castmode, do_bast, do_cast, last_castmode;
+       void (*cast) (void *astparam);
+       void (*bast) (void *astparam, int mode);
+       int type = 0, bastmode;
 
 repeat:
        spin_lock(&ast_queue_lock);
@@ -86,48 +75,17 @@ repeat:
                list_del(&lkb->lkb_astqueue);
                type = lkb->lkb_ast_type;
                lkb->lkb_ast_type = 0;
-               first = lkb->lkb_ast_first;
-               lkb->lkb_ast_first = 0;
                bastmode = lkb->lkb_bastmode;
-               castmode = lkb->lkb_castmode;
-               castfn = lkb->lkb_astfn;
-               bastfn = lkb->lkb_bastfn;
+
                spin_unlock(&ast_queue_lock);
+               cast = lkb->lkb_astfn;
+               bast = lkb->lkb_bastfn;
+
+               if ((type & AST_COMP) && cast)
+                       cast(lkb->lkb_astparam);
 
-               do_cast = (type & AST_COMP) && castfn;
-               do_bast = (type & AST_BAST) && bastfn;
-
-               /* Skip a bast if its blocking mode is compatible with the
-                  granted mode of the preceding cast. */
-
-               if (do_bast) {
-                       if (first == AST_COMP)
-                               last_castmode = castmode;
-                       else
-                               last_castmode = lkb->lkb_castmode_done;
-                       if (dlm_modes_compat(bastmode, last_castmode))
-                               do_bast = 0;
-               }
-
-               if (first == AST_COMP) {
-                       if (do_cast)
-                               castfn(lkb->lkb_astparam);
-                       if (do_bast)
-                               bastfn(lkb->lkb_astparam, bastmode);
-               } else if (first == AST_BAST) {
-                       if (do_bast)
-                               bastfn(lkb->lkb_astparam, bastmode);
-                       if (do_cast)
-                               castfn(lkb->lkb_astparam);
-               } else {
-                       log_error(ls, "bad ast_first %d ast_type %d",
-                                 first, type);
-               }
-
-               if (do_cast)
-                       lkb->lkb_castmode_done = castmode;
-               if (do_bast)
-                       lkb->lkb_bastmode_done = bastmode;
+               if ((type & AST_BAST) && bast)
+                       bast(lkb->lkb_astparam, bastmode);
 
                /* this removes the reference added by dlm_add_ast
                   and may result in the lkb being freed */
index bcb1aaba519d97c031b06b356e7aa9051a7ecb76..1b5fc5f428fdd2d0dc01050be27de84c65a2b659 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 *******************************************************************************
 **
-**  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -13,7 +13,7 @@
 #ifndef __ASTD_DOT_H__
 #define __ASTD_DOT_H__
 
-void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode);
+void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode);
 void dlm_del_ast(struct dlm_lkb *lkb);
 
 void dlm_astd_wake(void);
index 0df243850818340c4423fa56ae9264151285c0a1..fd9859f92fad3e05ed2ab372d226d6e61fdedfb8 100644 (file)
@@ -410,10 +410,10 @@ static struct config_group *make_cluster(struct config_group *g,
        struct dlm_comms *cms = NULL;
        void *gps = NULL;
 
-       cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
-       gps = kcalloc(3, sizeof(struct config_group *), GFP_NOFS);
-       sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
-       cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
+       cl = kzalloc(sizeof(struct dlm_cluster), GFP_KERNEL);
+       gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
+       sps = kzalloc(sizeof(struct dlm_spaces), GFP_KERNEL);
+       cms = kzalloc(sizeof(struct dlm_comms), GFP_KERNEL);
 
        if (!cl || !gps || !sps || !cms)
                goto fail;
@@ -482,9 +482,9 @@ static struct config_group *make_space(struct config_group *g, const char *name)
        struct dlm_nodes *nds = NULL;
        void *gps = NULL;
 
-       sp = kzalloc(sizeof(struct dlm_space), GFP_NOFS);
-       gps = kcalloc(2, sizeof(struct config_group *), GFP_NOFS);
-       nds = kzalloc(sizeof(struct dlm_nodes), GFP_NOFS);
+       sp = kzalloc(sizeof(struct dlm_space), GFP_KERNEL);
+       gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
+       nds = kzalloc(sizeof(struct dlm_nodes), GFP_KERNEL);
 
        if (!sp || !gps || !nds)
                goto fail;
@@ -536,7 +536,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
 {
        struct dlm_comm *cm;
 
-       cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS);
+       cm = kzalloc(sizeof(struct dlm_comm), GFP_KERNEL);
        if (!cm)
                return ERR_PTR(-ENOMEM);
 
@@ -569,7 +569,7 @@ static struct config_item *make_node(struct config_group *g, const char *name)
        struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
        struct dlm_node *nd;
 
-       nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS);
+       nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL);
        if (!nd)
                return ERR_PTR(-ENOMEM);
 
@@ -705,7 +705,7 @@ static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
        if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
                return -ENOSPC;
 
-       addr = kzalloc(sizeof(*addr), GFP_NOFS);
+       addr = kzalloc(sizeof(*addr), GFP_KERNEL);
        if (!addr)
                return -ENOMEM;
 
@@ -868,7 +868,7 @@ int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
 
        ids_count = sp->members_count;
 
-       ids = kcalloc(ids_count, sizeof(int), GFP_NOFS);
+       ids = kcalloc(ids_count, sizeof(int), GFP_KERNEL);
        if (!ids) {
                rv = -ENOMEM;
                goto out;
@@ -886,7 +886,7 @@ int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
        if (!new_count)
                goto out_ids;
 
-       new = kcalloc(new_count, sizeof(int), GFP_NOFS);
+       new = kcalloc(new_count, sizeof(int), GFP_KERNEL);
        if (!new) {
                kfree(ids);
                rv = -ENOMEM;
index 375a2359b3bfa526fd6c4950a024dfd83ce98d56..1c8bb8c3a82efd92519c16e0a01088e9bf9bf5bf 100644 (file)
@@ -404,7 +404,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
        if (bucket >= ls->ls_rsbtbl_size)
                return NULL;
 
-       ri = kzalloc(sizeof(struct rsbtbl_iter), GFP_NOFS);
+       ri = kzalloc(sizeof(struct rsbtbl_iter), GFP_KERNEL);
        if (!ri)
                return NULL;
        if (n == 0)
index 7b84c1dbc82ebeaec0a42846537524c73d1db13f..c4dfa1dcc86f32d43469f5122c22ade5516544a5 100644 (file)
@@ -49,7 +49,8 @@ static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
        spin_unlock(&ls->ls_recover_list_lock);
 
        if (!found)
-               de = kzalloc(sizeof(struct dlm_direntry) + len, GFP_NOFS);
+               de = kzalloc(sizeof(struct dlm_direntry) + len,
+                            ls->ls_allocation);
        return de;
 }
 
@@ -211,7 +212,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
 
        dlm_dir_clear(ls);
 
-       last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_NOFS);
+       last_name = kmalloc(DLM_RESNAME_MAXLEN, ls->ls_allocation);
        if (!last_name)
                goto out;
 
@@ -322,7 +323,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
        if (namelen > DLM_RESNAME_MAXLEN)
                return -EINVAL;
 
-       de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_NOFS);
+       de = kzalloc(sizeof(struct dlm_direntry) + namelen, ls->ls_allocation);
        if (!de)
                return -ENOMEM;
 
index f632b58cd2221c71fd7c6454cf70f9d17dfa7e1e..d01ca0a711db15de2f1d57854b9acb875e9c63e0 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2010 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -232,17 +232,11 @@ struct dlm_lkb {
        int8_t                  lkb_status;     /* granted, waiting, convert */
        int8_t                  lkb_rqmode;     /* requested lock mode */
        int8_t                  lkb_grmode;     /* granted lock mode */
+       int8_t                  lkb_bastmode;   /* requested mode */
        int8_t                  lkb_highbast;   /* highest mode bast sent for */
-
        int8_t                  lkb_wait_type;  /* type of reply waiting for */
        int8_t                  lkb_wait_count;
        int8_t                  lkb_ast_type;   /* type of ast queued for */
-       int8_t                  lkb_ast_first;  /* type of first ast queued */
-
-       int8_t                  lkb_bastmode;   /* req mode of queued bast */
-       int8_t                  lkb_castmode;   /* gr mode of queued cast */
-       int8_t                  lkb_bastmode_done; /* last delivered bastmode */
-       int8_t                  lkb_castmode_done; /* last delivered castmode */
 
        struct list_head        lkb_idtbl_list; /* lockspace lkbtbl */
        struct list_head        lkb_statequeue; /* rsb g/c/w list */
@@ -479,6 +473,7 @@ struct dlm_ls {
        int                     ls_low_nodeid;
        int                     ls_total_weight;
        int                     *ls_node_array;
+       gfp_t                   ls_allocation;
 
        struct dlm_rsb          ls_stub_rsb;    /* for returning errors */
        struct dlm_lkb          ls_stub_lkb;    /* for returning errors */
index d0e43a3da887b8d9f291810c4b69820bb76b63b7..eb507c453c5ff7ca221f933619a085461bec3300 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
 *******************************************************************************
 **
-**  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -307,7 +307,7 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
        lkb->lkb_lksb->sb_status = rv;
        lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
 
-       dlm_add_ast(lkb, AST_COMP, lkb->lkb_grmode);
+       dlm_add_ast(lkb, AST_COMP, 0);
 }
 
 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -2280,30 +2280,20 @@ static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
        if (can_be_queued(lkb)) {
                error = -EINPROGRESS;
                add_lkb(r, lkb, DLM_LKSTS_WAITING);
+               send_blocking_asts(r, lkb);
                add_timeout(lkb);
                goto out;
        }
 
        error = -EAGAIN;
+       if (force_blocking_asts(lkb))
+               send_blocking_asts_all(r, lkb);
        queue_cast(r, lkb, -EAGAIN);
+
  out:
        return error;
 }
 
-static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
-                              int error)
-{
-       switch (error) {
-       case -EAGAIN:
-               if (force_blocking_asts(lkb))
-                       send_blocking_asts_all(r, lkb);
-               break;
-       case -EINPROGRESS:
-               send_blocking_asts(r, lkb);
-               break;
-       }
-}
-
 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
 {
        int error = 0;
@@ -2314,6 +2304,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
        if (can_be_granted(r, lkb, 1, &deadlk)) {
                grant_lock(r, lkb);
                queue_cast(r, lkb, 0);
+               grant_pending_locks(r);
                goto out;
        }
 
@@ -2343,6 +2334,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
                if (_can_be_granted(r, lkb, 1)) {
                        grant_lock(r, lkb);
                        queue_cast(r, lkb, 0);
+                       grant_pending_locks(r);
                        goto out;
                }
                /* else fall through and move to convert queue */
@@ -2352,45 +2344,26 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
                error = -EINPROGRESS;
                del_lkb(r, lkb);
                add_lkb(r, lkb, DLM_LKSTS_CONVERT);
+               send_blocking_asts(r, lkb);
                add_timeout(lkb);
                goto out;
        }
 
        error = -EAGAIN;
+       if (force_blocking_asts(lkb))
+               send_blocking_asts_all(r, lkb);
        queue_cast(r, lkb, -EAGAIN);
+
  out:
        return error;
 }
 
-static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
-                              int error)
-{
-       switch (error) {
-       case 0:
-               grant_pending_locks(r);
-               /* grant_pending_locks also sends basts */
-               break;
-       case -EAGAIN:
-               if (force_blocking_asts(lkb))
-                       send_blocking_asts_all(r, lkb);
-               break;
-       case -EINPROGRESS:
-               send_blocking_asts(r, lkb);
-               break;
-       }
-}
-
 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
 {
        remove_lock(r, lkb);
        queue_cast(r, lkb, -DLM_EUNLOCK);
-       return -DLM_EUNLOCK;
-}
-
-static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
-                             int error)
-{
        grant_pending_locks(r);
+       return -DLM_EUNLOCK;
 }
 
 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
@@ -2402,18 +2375,12 @@ static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
        error = revert_lock(r, lkb);
        if (error) {
                queue_cast(r, lkb, -DLM_ECANCEL);
+               grant_pending_locks(r);
                return -DLM_ECANCEL;
        }
        return 0;
 }
 
-static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
-                             int error)
-{
-       if (error)
-               grant_pending_locks(r);
-}
-
 /*
  * Four stage 3 varieties:
  * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
@@ -2435,15 +2402,11 @@ static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
                goto out;
        }
 
-       if (is_remote(r)) {
+       if (is_remote(r))
                /* receive_request() calls do_request() on remote node */
                error = send_request(r, lkb);
-       } else {
+       else
                error = do_request(r, lkb);
-               /* for remote locks the request_reply is sent
-                  between do_request and do_request_effects */
-               do_request_effects(r, lkb, error);
-       }
  out:
        return error;
 }
@@ -2454,15 +2417,11 @@ static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
 {
        int error;
 
-       if (is_remote(r)) {
+       if (is_remote(r))
                /* receive_convert() calls do_convert() on remote node */
                error = send_convert(r, lkb);
-       } else {
+       else
                error = do_convert(r, lkb);
-               /* for remote locks the convert_reply is sent
-                  between do_convert and do_convert_effects */
-               do_convert_effects(r, lkb, error);
-       }
 
        return error;
 }
@@ -2473,15 +2432,11 @@ static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
 {
        int error;
 
-       if (is_remote(r)) {
+       if (is_remote(r))
                /* receive_unlock() calls do_unlock() on remote node */
                error = send_unlock(r, lkb);
-       } else {
+       else
                error = do_unlock(r, lkb);
-               /* for remote locks the unlock_reply is sent
-                  between do_unlock and do_unlock_effects */
-               do_unlock_effects(r, lkb, error);
-       }
 
        return error;
 }
@@ -2492,15 +2447,11 @@ static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
 {
        int error;
 
-       if (is_remote(r)) {
+       if (is_remote(r))
                /* receive_cancel() calls do_cancel() on remote node */
                error = send_cancel(r, lkb);
-       } else {
+       else
                error = do_cancel(r, lkb);
-               /* for remote locks the cancel_reply is sent
-                  between do_cancel and do_cancel_effects */
-               do_cancel_effects(r, lkb, error);
-       }
 
        return error;
 }
@@ -2738,7 +2689,7 @@ static int _create_message(struct dlm_ls *ls, int mb_len,
           pass into lowcomms_commit and a message buffer (mb) that we
           write our data into */
 
-       mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
+       mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, ls->ls_allocation, &mb);
        if (!mh)
                return -ENOBUFS;
 
@@ -3240,7 +3191,6 @@ static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
        attach_lkb(r, lkb);
        error = do_request(r, lkb);
        send_request_reply(r, lkb, error);
-       do_request_effects(r, lkb, error);
 
        unlock_rsb(r);
        put_rsb(r);
@@ -3276,19 +3226,15 @@ static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
                goto out;
 
        receive_flags(lkb, ms);
-
        error = receive_convert_args(ls, lkb, ms);
-       if (error) {
-               send_convert_reply(r, lkb, error);
-               goto out;
-       }
-
+       if (error)
+               goto out_reply;
        reply = !down_conversion(lkb);
 
        error = do_convert(r, lkb);
+ out_reply:
        if (reply)
                send_convert_reply(r, lkb, error);
-       do_convert_effects(r, lkb, error);
  out:
        unlock_rsb(r);
        put_rsb(r);
@@ -3320,16 +3266,13 @@ static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
                goto out;
 
        receive_flags(lkb, ms);
-
        error = receive_unlock_args(ls, lkb, ms);
-       if (error) {
-               send_unlock_reply(r, lkb, error);
-               goto out;
-       }
+       if (error)
+               goto out_reply;
 
        error = do_unlock(r, lkb);
+ out_reply:
        send_unlock_reply(r, lkb, error);
-       do_unlock_effects(r, lkb, error);
  out:
        unlock_rsb(r);
        put_rsb(r);
@@ -3364,7 +3307,6 @@ static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
 
        error = do_cancel(r, lkb);
        send_cancel_reply(r, lkb, error);
-       do_cancel_effects(r, lkb, error);
  out:
        unlock_rsb(r);
        put_rsb(r);
@@ -4570,7 +4512,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
        }
 
        if (flags & DLM_LKF_VALBLK) {
-               ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
+               ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
                if (!ua->lksb.sb_lvbptr) {
                        kfree(ua);
                        __put_lkb(ls, lkb);
@@ -4640,7 +4582,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
        ua = lkb->lkb_ua;
 
        if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
-               ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
+               ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
                if (!ua->lksb.sb_lvbptr) {
                        error = -ENOMEM;
                        goto out_put;
index c010ecfc0d295525de2455b02bf8a94dd8dbedea..d489fcc86713de1d0484902d6e7f911b57377cd0 100644 (file)
@@ -430,7 +430,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
 
        error = -ENOMEM;
 
-       ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
+       ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
        if (!ls)
                goto out;
        memcpy(ls->ls_name, name, namelen);
@@ -443,6 +443,11 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        if (flags & DLM_LSFL_TIMEWARN)
                set_bit(LSFL_TIMEWARN, &ls->ls_flags);
 
+       if (flags & DLM_LSFL_FS)
+               ls->ls_allocation = GFP_NOFS;
+       else
+               ls->ls_allocation = GFP_KERNEL;
+
        /* ls_exflags are forced to match among nodes, and we don't
           need to require all nodes to have some flags set */
        ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
@@ -451,7 +456,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        size = dlm_config.ci_rsbtbl_size;
        ls->ls_rsbtbl_size = size;
 
-       ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS);
+       ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
        if (!ls->ls_rsbtbl)
                goto out_lsfree;
        for (i = 0; i < size; i++) {
@@ -463,7 +468,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        size = dlm_config.ci_lkbtbl_size;
        ls->ls_lkbtbl_size = size;
 
-       ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS);
+       ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
        if (!ls->ls_lkbtbl)
                goto out_rsbfree;
        for (i = 0; i < size; i++) {
@@ -475,7 +480,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        size = dlm_config.ci_dirtbl_size;
        ls->ls_dirtbl_size = size;
 
-       ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS);
+       ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
        if (!ls->ls_dirtbl)
                goto out_lkbfree;
        for (i = 0; i < size; i++) {
@@ -522,7 +527,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
        mutex_init(&ls->ls_requestqueue_mutex);
        mutex_init(&ls->ls_clear_proc_locks);
 
-       ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
+       ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
        if (!ls->ls_recover_buf)
                goto out_dirfree;
 
index 52cab160893ce0d095e771dd1abe72a4b170162f..70736eb4b51652e9ad64078180ffc8c154265df4 100644 (file)
@@ -1060,7 +1060,7 @@ static void init_local(void)
                if (dlm_our_addr(&sas, i))
                        break;
 
-               addr = kmalloc(sizeof(*addr), GFP_NOFS);
+               addr = kmalloc(sizeof(*addr), GFP_KERNEL);
                if (!addr)
                        break;
                memcpy(addr, &sas, sizeof(*addr));
@@ -1099,7 +1099,7 @@ static int sctp_listen_for_all(void)
        struct sockaddr_storage localaddr;
        struct sctp_event_subscribe subscribe;
        int result = -EINVAL, num = 1, i, addr_len;
-       struct connection *con = nodeid2con(0, GFP_NOFS);
+       struct connection *con = nodeid2con(0, GFP_KERNEL);
        int bufsize = NEEDED_RMEM;
 
        if (!con)
@@ -1171,7 +1171,7 @@ out:
 static int tcp_listen_for_all(void)
 {
        struct socket *sock = NULL;
-       struct connection *con = nodeid2con(0, GFP_NOFS);
+       struct connection *con = nodeid2con(0, GFP_KERNEL);
        int result = -EINVAL;
 
        if (!con)
index 84f70bfb0baf4dfe4e19fee23238783965a637cc..b128775913b29d6b43aeeec34910af6f9583ce9a 100644 (file)
@@ -48,7 +48,7 @@ static int dlm_add_member(struct dlm_ls *ls, int nodeid)
        struct dlm_member *memb;
        int w, error;
 
-       memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
+       memb = kzalloc(sizeof(struct dlm_member), ls->ls_allocation);
        if (!memb)
                return -ENOMEM;
 
@@ -143,7 +143,7 @@ static void make_member_array(struct dlm_ls *ls)
 
        ls->ls_total_weight = total;
 
-       array = kmalloc(sizeof(int) * total, GFP_NOFS);
+       array = kmalloc(sizeof(int) * total, ls->ls_allocation);
        if (!array)
                return;
 
@@ -226,7 +226,7 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
                        continue;
                log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
 
-               memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
+               memb = kzalloc(sizeof(struct dlm_member), ls->ls_allocation);
                if (!memb)
                        return -ENOMEM;
                memb->nodeid = rv->new[i];
@@ -341,7 +341,7 @@ int dlm_ls_start(struct dlm_ls *ls)
        int *ids = NULL, *new = NULL;
        int error, ids_count = 0, new_count = 0;
 
-       rv = kzalloc(sizeof(struct dlm_recover), GFP_NOFS);
+       rv = kzalloc(sizeof(struct dlm_recover), ls->ls_allocation);
        if (!rv)
                return -ENOMEM;
 
index 8e0d00db004f8b8fd71cc75b3958c215beb836bd..c1775b84ebab2e552e4a4a04e5c8491f1fe99e3a 100644 (file)
@@ -39,7 +39,7 @@ char *dlm_allocate_lvb(struct dlm_ls *ls)
 {
        char *p;
 
-       p = kzalloc(ls->ls_lvblen, GFP_NOFS);
+       p = kzalloc(ls->ls_lvblen, ls->ls_allocation);
        return p;
 }
 
@@ -57,7 +57,7 @@ struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen)
 
        DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
 
-       r = kzalloc(sizeof(*r) + namelen, GFP_NOFS);
+       r = kzalloc(sizeof(*r) + namelen, ls->ls_allocation);
        return r;
 }
 
@@ -72,7 +72,7 @@ struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
 {
        struct dlm_lkb *lkb;
 
-       lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS);
+       lkb = kmem_cache_zalloc(lkb_cache, ls->ls_allocation);
        return lkb;
 }
 
index 052095cd592f3787ed20f525e1c7c08e0e30378b..55ea369f43a9f0d153ca769074b89e71d52082cf 100644 (file)
@@ -26,7 +26,7 @@ static int prepare_data(u8 cmd, struct sk_buff **skbp, size_t size)
        struct sk_buff *skb;
        void *data;
 
-       skb = genlmsg_new(size, GFP_NOFS);
+       skb = genlmsg_new(size, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
 
index 2863deb178e2668566607f1548fc6f454ba18d97..16f682e26c07e49493e8a667a6b36cf0cf9f6bb6 100644 (file)
@@ -82,7 +82,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
        if (!ls)
                return -EINVAL;
 
-       xop = kzalloc(sizeof(*xop), GFP_NOFS);
+       xop = kzalloc(sizeof(*xop), GFP_KERNEL);
        if (!xop) {
                rv = -ENOMEM;
                goto out;
@@ -211,7 +211,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
        if (!ls)
                return -EINVAL;
 
-       op = kzalloc(sizeof(*op), GFP_NOFS);
+       op = kzalloc(sizeof(*op), GFP_KERNEL);
        if (!op) {
                rv = -ENOMEM;
                goto out;
@@ -266,7 +266,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
        if (!ls)
                return -EINVAL;
 
-       op = kzalloc(sizeof(*op), GFP_NOFS);
+       op = kzalloc(sizeof(*op), GFP_KERNEL);
        if (!op) {
                rv = -ENOMEM;
                goto out;
index 3c83a49a48a3c6d464a779cf3929c6f670073d08..67522c268c14972d20fb9825de128ff66aa2b2a9 100644 (file)
@@ -38,7 +38,7 @@ static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
        char *mb;
        int mb_len = sizeof(struct dlm_rcom) + len;
 
-       mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
+       mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, ls->ls_allocation, &mb);
        if (!mh) {
                log_print("create_rcom to %d type %d len %d ENOBUFS",
                          to_nodeid, type, len);
index a44fa22890e1dd06797ae5e43e77a86c0cf4ca63..7a2307c08911101e9f82b99e0c6bad22c03a6e24 100644 (file)
@@ -35,7 +35,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
        struct rq_entry *e;
        int length = ms->m_header.h_length - sizeof(struct dlm_message);
 
-       e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
+       e = kmalloc(sizeof(struct rq_entry) + length, ls->ls_allocation);
        if (!e) {
                log_print("dlm_add_requestqueue: out of memory len %d", length);
                return;
index a4bfd31ac45bec4ad5e1010e0ad6a386a338f18b..ebce994ab0b717253a0bfa98230c80cefde397d2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2006-2009 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -173,7 +173,7 @@ static int lkb_is_endoflife(struct dlm_lkb *lkb, int sb_status, int type)
 /* we could possibly check if the cancel of an orphan has resulted in the lkb
    being removed and then remove that lkb from the orphans list and free it */
 
-void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
+void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
 {
        struct dlm_ls *ls;
        struct dlm_user_args *ua;
@@ -206,10 +206,8 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
 
        ast_type = lkb->lkb_ast_type;
        lkb->lkb_ast_type |= type;
-       if (type == AST_BAST)
-               lkb->lkb_bastmode = mode;
-       else
-               lkb->lkb_castmode = mode;
+       if (bastmode)
+               lkb->lkb_bastmode = bastmode;
 
        if (!ast_type) {
                kref_get(&lkb->lkb_ref);
@@ -269,7 +267,7 @@ static int device_user_lock(struct dlm_user_proc *proc,
                goto out;
        }
 
-       ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
+       ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
        if (!ua)
                goto out;
        ua->proc = proc;
@@ -309,7 +307,7 @@ static int device_user_unlock(struct dlm_user_proc *proc,
        if (!ls)
                return -ENOENT;
 
-       ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
+       ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
        if (!ua)
                goto out;
        ua->proc = proc;
@@ -354,7 +352,7 @@ static int dlm_device_register(struct dlm_ls *ls, char *name)
 
        error = -ENOMEM;
        len = strlen(name) + strlen(name_prefix) + 2;
-       ls->ls_device.name = kzalloc(len, GFP_NOFS);
+       ls->ls_device.name = kzalloc(len, GFP_KERNEL);
        if (!ls->ls_device.name)
                goto fail;
 
@@ -522,7 +520,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
 #endif
                return -EINVAL;
 
-       kbuf = kzalloc(count + 1, GFP_NOFS);
+       kbuf = kzalloc(count + 1, GFP_KERNEL);
        if (!kbuf)
                return -ENOMEM;
 
@@ -548,7 +546,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
 
                /* add 1 after namelen so that the name string is terminated */
                kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
-                              GFP_NOFS);
+                              GFP_KERNEL);
                if (!kbuf) {
                        kfree(k32buf);
                        return -ENOMEM;
@@ -650,7 +648,7 @@ static int device_open(struct inode *inode, struct file *file)
        if (!ls)
                return -ENOENT;
 
-       proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
+       proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL);
        if (!proc) {
                dlm_put_lockspace(ls);
                return -ENOMEM;
index f196091dd7ff8d31c687526d973c404d30098f00..1c96864922869b396bcbf2cab5a70c2eb32c064b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2006-2008 Red Hat, Inc.  All rights reserved.
  *
  * This copyrighted material is made available to anyone wishing to use,
  * modify, copy, or redistribute it subject to the terms and conditions
@@ -9,7 +9,7 @@
 #ifndef __USER_DOT_H__
 #define __USER_DOT_H__
 
-void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode);
+void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode);
 int dlm_user_init(void);
 void dlm_user_exit(void);
 int dlm_device_deregister(struct dlm_ls *ls);
index 4e25328986ac7b6718df18eace61eea75693a190..1744f17ce96ed6954955b1fdd1c0504ab04f7c45 100644 (file)
@@ -198,7 +198,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
                               "the persistent file for the dentry with name "
                               "[%s]; rc = [%d]\n", __func__,
                               ecryptfs_dentry->d_name.name, rc);
-                       goto out_free;
+                       goto out;
                }
        }
        if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
@@ -206,7 +206,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
                rc = -EPERM;
                printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
                       "file must hence be opened RO\n", __func__);
-               goto out_free;
+               goto out;
        }
        ecryptfs_set_file_lower(
                file, ecryptfs_inode_to_private(inode)->lower_file);
@@ -293,40 +293,12 @@ static int ecryptfs_fasync(int fd, struct file *file, int flag)
        return rc;
 }
 
-static long
-ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct file *lower_file = NULL;
-       long rc = -ENOTTY;
-
-       if (ecryptfs_file_to_private(file))
-               lower_file = ecryptfs_file_to_lower(file);
-       if (lower_file && lower_file->f_op && lower_file->f_op->unlocked_ioctl)
-               rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
-       return rc;
-}
-
-#ifdef CONFIG_COMPAT
-static long
-ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct file *lower_file = NULL;
-       long rc = -ENOIOCTLCMD;
-
-       if (ecryptfs_file_to_private(file))
-               lower_file = ecryptfs_file_to_lower(file);
-       if (lower_file && lower_file->f_op && lower_file->f_op->compat_ioctl)
-               rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
-       return rc;
-}
-#endif
+static int ecryptfs_ioctl(struct inode *inode, struct file *file,
+                         unsigned int cmd, unsigned long arg);
 
 const struct file_operations ecryptfs_dir_fops = {
        .readdir = ecryptfs_readdir,
-       .unlocked_ioctl = ecryptfs_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = ecryptfs_compat_ioctl,
-#endif
+       .ioctl = ecryptfs_ioctl,
        .mmap = generic_file_mmap,
        .open = ecryptfs_open,
        .flush = ecryptfs_flush,
@@ -343,10 +315,7 @@ const struct file_operations ecryptfs_main_fops = {
        .write = do_sync_write,
        .aio_write = generic_file_aio_write,
        .readdir = ecryptfs_readdir,
-       .unlocked_ioctl = ecryptfs_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = ecryptfs_compat_ioctl,
-#endif
+       .ioctl = ecryptfs_ioctl,
        .mmap = generic_file_mmap,
        .open = ecryptfs_open,
        .flush = ecryptfs_flush,
@@ -355,3 +324,20 @@ const struct file_operations ecryptfs_main_fops = {
        .fasync = ecryptfs_fasync,
        .splice_read = generic_file_splice_read,
 };
+
+static int
+ecryptfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+              unsigned long arg)
+{
+       int rc = 0;
+       struct file *lower_file = NULL;
+
+       if (ecryptfs_file_to_private(file))
+               lower_file = ecryptfs_file_to_lower(file);
+       if (lower_file && lower_file->f_op && lower_file->f_op->ioctl)
+               rc = lower_file->f_op->ioctl(ecryptfs_inode_to_lower(inode),
+                                            lower_file, cmd, arg);
+       else
+               rc = -ENOTTY;
+       return rc;
+}
index b582f09a90122cd8f9c966703f33d63f2ccde3a4..728f07ebc593db3d6287339cdfa77d5d3261bf8e 100644 (file)
@@ -69,19 +69,15 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
        struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
        struct dentry *dentry_save;
        struct vfsmount *vfsmount_save;
-       unsigned int flags_save;
        int rc;
 
        dentry_save = nd->path.dentry;
        vfsmount_save = nd->path.mnt;
-       flags_save = nd->flags;
        nd->path.dentry = lower_dentry;
        nd->path.mnt = lower_mnt;
-       nd->flags &= ~LOOKUP_OPEN;
        rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
        nd->path.dentry = dentry_save;
        nd->path.mnt = vfsmount_save;
-       nd->flags = flags_save;
        return rc;
 }
 
@@ -276,7 +272,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
                printk(KERN_ERR "%s: Out of memory whilst attempting "
                       "to allocate ecryptfs_dentry_info struct\n",
                        __func__);
-               goto out_put;
+               goto out_dput;
        }
        ecryptfs_set_dentry_lower(ecryptfs_dentry, lower_dentry);
        ecryptfs_set_dentry_lower_mnt(ecryptfs_dentry, lower_mnt);
@@ -349,9 +345,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
 out_free_kmem:
        kmem_cache_free(ecryptfs_header_cache_2, page_virt);
        goto out;
-out_put:
+out_dput:
        dput(lower_dentry);
-       mntput(lower_mnt);
        d_drop(ecryptfs_dentry);
 out:
        return rc;
@@ -643,17 +638,38 @@ out_lock:
        return rc;
 }
 
-static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
-                                  size_t *bufsiz)
+static int
+ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
 {
-       struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
        char *lower_buf;
-       size_t lower_bufsiz = PATH_MAX;
+       size_t lower_bufsiz;
+       struct dentry *lower_dentry;
+       struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+       char *plaintext_name;
+       size_t plaintext_name_size;
        mm_segment_t old_fs;
        int rc;
 
+       lower_dentry = ecryptfs_dentry_to_lower(dentry);
+       if (!lower_dentry->d_inode->i_op->readlink) {
+               rc = -EINVAL;
+               goto out;
+       }
+       mount_crypt_stat = &ecryptfs_superblock_to_private(
+                                               dentry->d_sb)->mount_crypt_stat;
+       /*
+        * If the lower filename is encrypted, it will result in a significantly
+        * longer name.  If needed, truncate the name after decode and decrypt.
+        */
+       if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
+               lower_bufsiz = PATH_MAX;
+       else
+               lower_bufsiz = bufsiz;
+       /* Released in this function */
        lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
-       if (!lower_buf) {
+       if (lower_buf == NULL) {
+               printk(KERN_ERR "%s: Out of memory whilst attempting to "
+                      "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
                rc = -ENOMEM;
                goto out;
        }
@@ -663,31 +679,29 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
                                                   (char __user *)lower_buf,
                                                   lower_bufsiz);
        set_fs(old_fs);
-       if (rc < 0)
-               goto out;
-       lower_bufsiz = rc;
-       rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
-                                                 lower_buf, lower_bufsiz);
-out:
+       if (rc >= 0) {
+               rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
+                                                         &plaintext_name_size,
+                                                         dentry, lower_buf,
+                                                         rc);
+               if (rc) {
+                       printk(KERN_ERR "%s: Error attempting to decode and "
+                              "decrypt filename; rc = [%d]\n", __func__,
+                               rc);
+                       goto out_free_lower_buf;
+               }
+               /* Check for bufsiz <= 0 done in sys_readlinkat() */
+               rc = copy_to_user(buf, plaintext_name,
+                                 min((size_t) bufsiz, plaintext_name_size));
+               if (rc)
+                       rc = -EFAULT;
+               else
+                       rc = plaintext_name_size;
+               kfree(plaintext_name);
+               fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
+       }
+out_free_lower_buf:
        kfree(lower_buf);
-       return rc;
-}
-
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
-{
-       char *kbuf;
-       size_t kbufsiz, copied;
-       int rc;
-
-       rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
-       if (rc)
-               goto out;
-       copied = min_t(size_t, bufsiz, kbufsiz);
-       rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
-       kfree(kbuf);
-       fsstack_copy_attr_atime(dentry->d_inode,
-                               ecryptfs_dentry_to_lower(dentry)->d_inode);
 out:
        return rc;
 }
@@ -957,28 +971,6 @@ out:
        return rc;
 }
 
-int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
-                         struct kstat *stat)
-{
-       struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
-       int rc = 0;
-
-       mount_crypt_stat = &ecryptfs_superblock_to_private(
-                                               dentry->d_sb)->mount_crypt_stat;
-       generic_fillattr(dentry->d_inode, stat);
-       if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
-               char *target;
-               size_t targetsiz;
-
-               rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
-               if (!rc) {
-                       kfree(target);
-                       stat->size = targetsiz;
-               }
-       }
-       return rc;
-}
-
 int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
                     struct kstat *stat)
 {
@@ -1003,7 +995,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
 
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        if (!lower_dentry->d_inode->i_op->setxattr) {
-               rc = -EOPNOTSUPP;
+               rc = -ENOSYS;
                goto out;
        }
        mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1021,7 +1013,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
        int rc = 0;
 
        if (!lower_dentry->d_inode->i_op->getxattr) {
-               rc = -EOPNOTSUPP;
+               rc = -ENOSYS;
                goto out;
        }
        mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1048,7 +1040,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
 
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        if (!lower_dentry->d_inode->i_op->listxattr) {
-               rc = -EOPNOTSUPP;
+               rc = -ENOSYS;
                goto out;
        }
        mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1065,7 +1057,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
 
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        if (!lower_dentry->d_inode->i_op->removexattr) {
-               rc = -EOPNOTSUPP;
+               rc = -ENOSYS;
                goto out;
        }
        mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1096,7 +1088,6 @@ const struct inode_operations ecryptfs_symlink_iops = {
        .put_link = ecryptfs_put_link,
        .permission = ecryptfs_permission,
        .setattr = ecryptfs_setattr,
-       .getattr = ecryptfs_getattr_link,
        .setxattr = ecryptfs_setxattr,
        .getxattr = ecryptfs_getxattr,
        .listxattr = ecryptfs_listxattr,
index 3dfe7ce86b1b4b61f3dcf161ea8dd1d7fb96f307..f1c17e87c5fbc852443f01da834f256f3c33262b 100644 (file)
@@ -30,9 +30,9 @@ static struct mutex ecryptfs_msg_ctx_lists_mux;
 
 static struct hlist_head *ecryptfs_daemon_hash;
 struct mutex ecryptfs_daemon_hash_mux;
-static int ecryptfs_hash_bits;
+static int ecryptfs_hash_buckets;
 #define ecryptfs_uid_hash(uid) \
-        hash_long((unsigned long)uid, ecryptfs_hash_bits)
+        hash_long((unsigned long)uid, ecryptfs_hash_buckets)
 
 static u32 ecryptfs_msg_counter;
 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
@@ -485,19 +485,18 @@ int ecryptfs_init_messaging(void)
        }
        mutex_init(&ecryptfs_daemon_hash_mux);
        mutex_lock(&ecryptfs_daemon_hash_mux);
-       ecryptfs_hash_bits = 1;
-       while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
-               ecryptfs_hash_bits++;
+       ecryptfs_hash_buckets = 1;
+       while (ecryptfs_number_of_users >> ecryptfs_hash_buckets)
+               ecryptfs_hash_buckets++;
        ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
-                                       * (1 << ecryptfs_hash_bits)),
-                                      GFP_KERNEL);
+                                       * ecryptfs_hash_buckets), GFP_KERNEL);
        if (!ecryptfs_daemon_hash) {
                rc = -ENOMEM;
                printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
                mutex_unlock(&ecryptfs_daemon_hash_mux);
                goto out;
        }
-       for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
+       for (i = 0; i < ecryptfs_hash_buckets; i++)
                INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
        mutex_unlock(&ecryptfs_daemon_hash_mux);
        ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
@@ -554,7 +553,7 @@ void ecryptfs_release_messaging(void)
                int i;
 
                mutex_lock(&ecryptfs_daemon_hash_mux);
-               for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
+               for (i = 0; i < ecryptfs_hash_buckets; i++) {
                        int rc;
 
                        hlist_for_each_entry(daemon, elem,
index 1a037f77aa52c1a0abcccfcfd24dcb9cceb63bff..b15a43a80ab78cc7dbb9e05dedef0dbb0dd0e6ee 100644 (file)
@@ -85,6 +85,7 @@ static void ecryptfs_destroy_inode(struct inode *inode)
                if (lower_dentry->d_inode) {
                        fput(inode_info->lower_file);
                        inode_info->lower_file = NULL;
+                       d_drop(lower_dentry);
                }
        }
        ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
index a0410eb44dfa8cf58f10617fa88859f04905f62d..da36c206f0f1ee86539a23608448028d8d095c78 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,9 +376,6 @@ static int count(char __user * __user * argv, int max)
                        argv++;
                        if (i++ >= max)
                                return -E2BIG;
-
-                       if (fatal_signal_pending(current))
-                               return -ERESTARTNOHAND;
                        cond_resched();
                }
        }
@@ -422,12 +419,6 @@ static int copy_strings(int argc, char __user * __user * argv,
                while (len > 0) {
                        int offset, bytes_to_copy;
 
-                       if (fatal_signal_pending(current)) {
-                               ret = -ERESTARTNOHAND;
-                               goto out;
-                       }
-                       cond_resched();
-
                        offset = pos % PAGE_SIZE;
                        if (offset == 0)
                                offset = PAGE_SIZE;
@@ -603,11 +594,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
 #else
        stack_top = arch_align_stack(stack_top);
        stack_top = PAGE_ALIGN(stack_top);
-
-       if (unlikely(stack_top < mmap_min_addr) ||
-           unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
-               return -ENOMEM;
-
        stack_shift = vma->vm_end - stack_top;
 
        bprm->p -= stack_shift;
@@ -652,6 +638,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
         * will align it up.
         */
        rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
+       rlim_stack = min(rlim_stack, stack_size);
 #ifdef CONFIG_STACK_GROWSUP
        if (stack_size + stack_expand > rlim_stack)
                stack_base = vma->vm_start + rlim_stack;
@@ -1393,6 +1380,8 @@ int do_execve(char * filename,
        if (retval < 0)
                goto out;
 
+       current->stack_start = current->mm->start_stack;
+
        /* execve succeeded */
        current->fs->in_exec = 0;
        current->in_execve = 0;
@@ -1925,9 +1914,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
        /*
         * Dont allow local users get cute and trick others to coredump
         * into their pre-created files:
-        * Note, this is not relevant for pipes
         */
-       if (!ispipe && (inode->i_uid != current_fsuid()))
+       if (inode->i_uid != current_fsuid())
                goto close_fail;
        if (!file->f_op)
                goto close_fail;
index d91e9d829bc1bcc52cce4c4965d375a3eb7d356f..4cfab1cc75c03a0bb7654e5c8150c2009fe4aa11 100644 (file)
@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
        de->inode_no = cpu_to_le64(parent->i_ino);
        memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
        exofs_set_de_type(de, inode);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(page, KM_USER0);
        err = exofs_commit_chunk(page, 0, chunk_size);
 fail:
        page_cache_release(page);
index ca3068fd234606ab6ab2cd8bb8b178a23f022945..427496c4767cda4c542f43cb97b8a3089e77bcce 100644 (file)
@@ -2686,11 +2686,13 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
        buf->f_bsize = sb->s_blocksize;
        buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
        buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
+       es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
        buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
        if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
                buf->f_bavail = 0;
        buf->f_files = le32_to_cpu(es->s_inodes_count);
        buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
+       es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
        buf->f_namelen = EXT3_NAME_LEN;
        fsid = le64_to_cpup((void *)es->s_uuid) ^
               le64_to_cpup((void *)es->s_uuid + sizeof(u64));
index 387d92d00b977cd271bdab9416323909908242e3..545e37c4b91eb6387ddc015e17e000ad1ccbaaa3 100644 (file)
@@ -960,10 +960,6 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
        if (error)
                goto cleanup;
 
-       error = ext3_journal_get_write_access(handle, is.iloc.bh);
-       if (error)
-               goto cleanup;
-
        if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
                struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
                memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
@@ -989,6 +985,9 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
                if (flags & XATTR_CREATE)
                        goto cleanup;
        }
+       error = ext3_journal_get_write_access(handle, is.iloc.bh);
+       if (error)
+               goto cleanup;
        if (!value) {
                if (!is.s.not_found)
                        error = ext3_xattr_ibody_set(handle, inode, &i, &is);
index e85b63c9fbcb6b49cba4c8c154a22f1f556ff128..f3032c919a22dbee67f4d4d2f1f17269e69bac30 100644 (file)
@@ -189,6 +189,9 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
  * when a file system is mounted (see ext4_fill_super).
  */
 
+
+#define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
+
 /**
  * ext4_get_group_desc() -- load group descriptor from disk
  * @sb:                        super block
index aa6fb6b9d3ad777624b5b918467ea58939d50c72..9dc93168e2623ae09d26b1c7287f4fc30975e077 100644 (file)
@@ -84,11 +84,9 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
 
        if (error_msg != NULL)
                ext4_error(dir->i_sb, function,
-                       "bad entry in directory #%lu: %s - block=%llu"
-                       "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
-                       dir->i_ino, error_msg,
-                       (unsigned long long) bh->b_blocknr,
-                       (unsigned) (offset%bh->b_size), offset,
+                       "bad entry in directory #%lu: %s - "
+                       "offset=%u, inode=%u, rec_len=%d, name_len=%d",
+                       dir->i_ino, error_msg, offset,
                        le32_to_cpu(de->inode),
                        rlen, de->name_len);
        return error_msg == NULL ? 1 : 0;
@@ -111,7 +109,7 @@ static int ext4_readdir(struct file *filp,
 
        if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
                                    EXT4_FEATURE_COMPAT_DIR_INDEX) &&
-           ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
+           ((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) ||
             ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
                err = ext4_dx_readdir(filp, dirent, filldir);
                if (err != ERR_BAD_DX_DIR) {
@@ -122,7 +120,7 @@ static int ext4_readdir(struct file *filp,
                 * We don't set the inode dirty flag since it's not
                 * critical that it get flushed back to the disk.
                 */
-               ext4_clear_inode_flag(filp->f_path.dentry->d_inode, EXT4_INODE_INDEX);
+               EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL;
        }
        stored = 0;
        offset = filp->f_pos & (sb->s_blocksize - 1);
index 0773352fd5125c8425c881ec7dd8af91367e84c7..d0a2afbab009de427b26b64176679ba58d12782a 100644 (file)
@@ -29,9 +29,6 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
-#ifdef __KERNEL__
-#include <linux/compat.h>
-#endif
 
 /*
  * The fourth extended filesystem constants/structures
@@ -142,8 +139,8 @@ typedef struct ext4_io_end {
        struct inode            *inode;         /* file being written to */
        unsigned int            flag;           /* unwritten or not */
        int                     error;          /* I/O error code */
-       loff_t                  offset;         /* offset in the file */
-       ssize_t                 size;           /* size of the extent */
+       ext4_lblk_t             offset;         /* offset in the file */
+       size_t                  size;           /* size of the extent */
        struct work_struct      work;           /* data work queue */
 } ext4_io_end_t;
 
@@ -287,12 +284,10 @@ struct flex_groups {
 #define EXT4_TOPDIR_FL                 0x00020000 /* Top of directory hierarchies*/
 #define EXT4_HUGE_FILE_FL               0x00040000 /* Set to each huge file */
 #define EXT4_EXTENTS_FL                        0x00080000 /* Inode uses extents */
-#define EXT4_EA_INODE_FL               0x00200000 /* Inode used for large EA */
-#define EXT4_EOFBLOCKS_FL              0x00400000 /* Blocks allocated beyond EOF */
 #define EXT4_RESERVED_FL               0x80000000 /* reserved for ext4 lib */
 
-#define EXT4_FL_USER_VISIBLE           0x004BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE                0x004B80FF /* User modifiable flags */
+#define EXT4_FL_USER_VISIBLE           0x000BDFFF /* User visible flags */
+#define EXT4_FL_USER_MODIFIABLE                0x000B80FF /* User modifiable flags */
 
 /* Flags that should be inherited by new inodes from their parent. */
 #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
@@ -319,81 +314,15 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
 }
 
 /*
- * Inode flags used for atomic set/get
- */
-enum {
-       EXT4_INODE_SECRM        = 0,    /* Secure deletion */
-       EXT4_INODE_UNRM         = 1,    /* Undelete */
-       EXT4_INODE_COMPR        = 2,    /* Compress file */
-       EXT4_INODE_SYNC         = 3,    /* Synchronous updates */
-       EXT4_INODE_IMMUTABLE    = 4,    /* Immutable file */
-       EXT4_INODE_APPEND       = 5,    /* writes to file may only append */
-       EXT4_INODE_NODUMP       = 6,    /* do not dump file */
-       EXT4_INODE_NOATIME      = 7,    /* do not update atime */
-/* Reserved for compression usage... */
-       EXT4_INODE_DIRTY        = 8,
-       EXT4_INODE_COMPRBLK     = 9,    /* One or more compressed clusters */
-       EXT4_INODE_NOCOMPR      = 10,   /* Don't compress */
-       EXT4_INODE_ECOMPR       = 11,   /* Compression error */
-/* End compression flags --- maybe not all used */
-       EXT4_INODE_INDEX        = 12,   /* hash-indexed directory */
-       EXT4_INODE_IMAGIC       = 13,   /* AFS directory */
-       EXT4_INODE_JOURNAL_DATA = 14,   /* file data should be journaled */
-       EXT4_INODE_NOTAIL       = 15,   /* file tail should not be merged */
-       EXT4_INODE_DIRSYNC      = 16,   /* dirsync behaviour (directories only) */
-       EXT4_INODE_TOPDIR       = 17,   /* Top of directory hierarchies*/
-       EXT4_INODE_HUGE_FILE    = 18,   /* Set to each huge file */
-       EXT4_INODE_EXTENTS      = 19,   /* Inode uses extents */
-       EXT4_INODE_EA_INODE     = 21,   /* Inode used for large EA */
-       EXT4_INODE_EOFBLOCKS    = 22,   /* Blocks allocated beyond EOF */
-       EXT4_INODE_RESERVED     = 31,   /* reserved for ext4 lib */
-};
-
-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
-#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \
-       printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \
-               EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); }
-
-/*
- * Since it's pretty easy to mix up bit numbers and hex values, and we
- * can't do a compile-time test for ENUM values, we use a run-time
- * test to make sure that EXT4_XXX_FL is consistent with respect to
- * EXT4_INODE_XXX.  If all is well the printk and BUG_ON will all drop
- * out so it won't cost any extra space in the compiled kernel image.
- * But it's important that these values are the same, since we are
- * using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL
- * must be consistent with the values of FS_XXX_FL defined in
- * include/linux/fs.h and the on-disk values found in ext2, ext3, and
- * ext4 filesystems, and of course the values defined in e2fsprogs.
- *
- * It's not paranoia if the Murphy's Law really *is* out to get you.  :-)
+ * Inode dynamic state flags
  */
-static inline void ext4_check_flag_values(void)
-{
-       CHECK_FLAG_VALUE(SECRM);
-       CHECK_FLAG_VALUE(UNRM);
-       CHECK_FLAG_VALUE(COMPR);
-       CHECK_FLAG_VALUE(SYNC);
-       CHECK_FLAG_VALUE(IMMUTABLE);
-       CHECK_FLAG_VALUE(APPEND);
-       CHECK_FLAG_VALUE(NODUMP);
-       CHECK_FLAG_VALUE(NOATIME);
-       CHECK_FLAG_VALUE(DIRTY);
-       CHECK_FLAG_VALUE(COMPRBLK);
-       CHECK_FLAG_VALUE(NOCOMPR);
-       CHECK_FLAG_VALUE(ECOMPR);
-       CHECK_FLAG_VALUE(INDEX);
-       CHECK_FLAG_VALUE(IMAGIC);
-       CHECK_FLAG_VALUE(JOURNAL_DATA);
-       CHECK_FLAG_VALUE(NOTAIL);
-       CHECK_FLAG_VALUE(DIRSYNC);
-       CHECK_FLAG_VALUE(TOPDIR);
-       CHECK_FLAG_VALUE(HUGE_FILE);
-       CHECK_FLAG_VALUE(EXTENTS);
-       CHECK_FLAG_VALUE(EA_INODE);
-       CHECK_FLAG_VALUE(EOFBLOCKS);
-       CHECK_FLAG_VALUE(RESERVED);
-}
+#define EXT4_STATE_JDATA               0x00000001 /* journaled data exists */
+#define EXT4_STATE_NEW                 0x00000002 /* inode is newly created */
+#define EXT4_STATE_XATTR               0x00000004 /* has in-inode xattrs */
+#define EXT4_STATE_NO_EXPAND           0x00000008 /* No space for expansion */
+#define EXT4_STATE_DA_ALLOC_CLOSE      0x00000010 /* Alloc DA blks on close */
+#define EXT4_STATE_EXT_MIGRATE         0x00000020 /* Inode is migrating */
+#define EXT4_STATE_DIO_UNWRITTEN       0x00000040 /* need convert on dio done*/
 
 /* Used to pass group descriptor data when online resize is done */
 struct ext4_new_group_input {
@@ -406,18 +335,6 @@ struct ext4_new_group_input {
        __u16 unused;
 };
 
-#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
-struct compat_ext4_new_group_input {
-       u32 group;
-       compat_u64 block_bitmap;
-       compat_u64 inode_bitmap;
-       compat_u64 inode_table;
-       u32 blocks_count;
-       u16 reserved_blocks;
-       u16 unused;
-};
-#endif
-
 /* The struct ext4_new_group_input in kernel space, with free_blocks_count */
 struct ext4_new_group_data {
        __u32 group;
@@ -444,11 +361,14 @@ struct ext4_new_group_data {
           so set the magic i_delalloc_reserve_flag after taking the 
           inode allocation semaphore for */
 #define EXT4_GET_BLOCKS_DELALLOC_RESERVE       0x0004
+       /* Call ext4_da_update_reserve_space() after successfully 
+          allocating the blocks */
+#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE   0x0008
        /* caller is from the direct IO path, request to creation of an
        unitialized extents if not allocated, split the uninitialized
        extent if blocks has been preallocated already*/
-#define EXT4_GET_BLOCKS_DIO                    0x0008
-#define EXT4_GET_BLOCKS_CONVERT                        0x0010
+#define EXT4_GET_BLOCKS_DIO                    0x0010
+#define EXT4_GET_BLOCKS_CONVERT                        0x0020
 #define EXT4_GET_BLOCKS_DIO_CREATE_EXT         (EXT4_GET_BLOCKS_DIO|\
                                         EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
        /* Convert extent to initialized after direct IO complete */
@@ -477,7 +397,6 @@ struct ext4_new_group_data {
 #define EXT4_IOC_ALLOC_DA_BLKS         _IO('f', 12)
 #define EXT4_IOC_MOVE_EXT              _IOWR('f', 15, struct move_extent)
 
-#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
  * ioctl commands in 32 bit emulation
  */
@@ -488,13 +407,11 @@ struct ext4_new_group_data {
 #define EXT4_IOC32_GETRSVSZ            _IOR('f', 5, int)
 #define EXT4_IOC32_SETRSVSZ            _IOW('f', 6, int)
 #define EXT4_IOC32_GROUP_EXTEND                _IOW('f', 7, unsigned int)
-#define EXT4_IOC32_GROUP_ADD           _IOW('f', 8, struct compat_ext4_new_group_input)
 #ifdef CONFIG_JBD2_DEBUG
 #define EXT4_IOC32_WAIT_FOR_READONLY   _IOR('f', 99, int)
 #endif
 #define EXT4_IOC32_GETVERSION_OLD      FS_IOC32_GETVERSION
 #define EXT4_IOC32_SETVERSION_OLD      FS_IOC32_SETVERSION
-#endif
 
 
 /*
@@ -698,8 +615,9 @@ struct ext4_ext_cache {
  */
 struct ext4_inode_info {
        __le32  i_data[15];     /* unconverted */
-       __u32   i_dtime;
+       __u32   i_flags;
        ext4_fsblk_t    i_file_acl;
+       __u32   i_dtime;
 
        /*
         * i_block_group is the number of the block group which contains
@@ -709,8 +627,7 @@ struct ext4_inode_info {
         * near to their parent directory's inode.
         */
        ext4_group_t    i_block_group;
-       unsigned long   i_state_flags;          /* Dynamic state flags */
-       unsigned long   i_flags;
+       __u32   i_state;                /* Dynamic state flags for ext4 */
 
        ext4_lblk_t             i_dir_start_lookup;
 #ifdef CONFIG_EXT4_FS_XATTR
@@ -776,8 +693,6 @@ struct ext4_inode_info {
        unsigned int i_reserved_meta_blocks;
        unsigned int i_allocated_meta_blocks;
        unsigned short i_delalloc_reserved_flag;
-       sector_t i_da_metadata_calc_last_lblock;
-       int i_da_metadata_calc_len;
 
        /* on-disk additional length */
        __u16 i_extra_isize;
@@ -1130,37 +1045,6 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
                (ino >= EXT4_FIRST_INO(sb) &&
                 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
 }
-
-/*
- * Inode dynamic state flags
- */
-enum {
-       EXT4_STATE_JDATA,               /* journaled data exists */
-       EXT4_STATE_NEW,                 /* inode is newly created */
-       EXT4_STATE_XATTR,               /* has in-inode xattrs */
-       EXT4_STATE_NO_EXPAND,           /* No space for expansion */
-       EXT4_STATE_DA_ALLOC_CLOSE,      /* Alloc DA blks on close */
-       EXT4_STATE_EXT_MIGRATE,         /* Inode is migrating */
-       EXT4_STATE_DIO_UNWRITTEN,       /* need convert on dio done*/
-       EXT4_STATE_NEWENTRY,            /* File just added to dir */
-};
-
-#define EXT4_INODE_BIT_FNS(name, field)                                        \
-static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
-{                                                                      \
-       return test_bit(bit, &EXT4_I(inode)->i_##field);                \
-}                                                                      \
-static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
-{                                                                      \
-       set_bit(bit, &EXT4_I(inode)->i_##field);                        \
-}                                                                      \
-static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
-{                                                                      \
-       clear_bit(bit, &EXT4_I(inode)->i_##field);                      \
-}
-
-EXT4_INODE_BIT_FNS(flag, flags)
-EXT4_INODE_BIT_FNS(state, state_flags)
 #else
 /* Assume that user mode programs are passing in an ext4fs superblock, not
  * a kernel struct super_block.  This will allow us to call the feature-test
@@ -1345,7 +1229,7 @@ struct ext4_dir_entry_2 {
 
 #define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
                                      EXT4_FEATURE_COMPAT_DIR_INDEX) && \
-                   ext4_test_inode_flag((dir), EXT4_INODE_INDEX))
+                     (EXT4_I(dir)->i_flags & EXT4_INDEX_FL))
 #define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
 #define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
 
@@ -1554,8 +1438,6 @@ extern int ext4_block_truncate_page(handle_t *handle,
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern int flush_aio_dio_completed_IO(struct inode *inode);
-extern void ext4_da_update_reserve_space(struct inode *inode,
-                                       int used, int quota_claim);
 /* ioctl.c */
 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
 extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -1755,7 +1637,6 @@ struct ext4_group_info {
        ext4_grpblk_t   bb_first_free;  /* first free block */
        ext4_grpblk_t   bb_free;        /* total free blocks */
        ext4_grpblk_t   bb_fragments;   /* nr of freespace fragments */
-       ext4_grpblk_t   bb_largest_free_order;/* order of largest frag in BG */
        struct          list_head bb_prealloc_list;
 #ifdef DOUBLE_CHECK
        void            *bb_bitmap;
@@ -1859,7 +1740,7 @@ extern void ext4_ext_release(struct super_block *);
 extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
                          loff_t len);
 extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
-                         ssize_t len);
+                         loff_t len);
 extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
                           sector_t block, unsigned int max_blocks,
                           struct buffer_head *bh, int flags);
@@ -1888,8 +1769,6 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
        set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
 }
 
-#define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
-
 #endif /* __KERNEL__ */
 
 #endif /* _EXT4_H */
index bdb6ce7e2eb48d08c1bf006e28fdc7b27a8651dd..2ca686454e875c976ff969d5ecf7e437d0dadd40 100644 (file)
@@ -225,8 +225,7 @@ static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
        ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
 }
 
-extern int ext4_ext_calc_metadata_amount(struct inode *inode,
-                                        sector_t lblocks);
+extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks);
 extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
 extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
 extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
index 496249aeec965805afcd01082f821a13e181f17f..6a9409920deef2f6040b8d7279114cfc0612221d 100644 (file)
@@ -89,7 +89,7 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
                        ext4_journal_abort_handle(where, __func__, bh,
                                                  handle, err);
        } else {
-               if (inode)
+               if (inode && bh)
                        mark_buffer_dirty_inode(bh, inode);
                else
                        mark_buffer_dirty(bh);
index 386095d5b7606394a89df86bd47a6cdd65ac95ae..1892a7763426e8f6021990256cf3056eb9a86e16 100644 (file)
@@ -282,7 +282,7 @@ static inline int ext4_should_journal_data(struct inode *inode)
                return 1;
        if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
                return 1;
-       if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+       if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
                return 1;
        return 0;
 }
@@ -293,7 +293,7 @@ static inline int ext4_should_order_data(struct inode *inode)
                return 0;
        if (!S_ISREG(inode->i_mode))
                return 0;
-       if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+       if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
                return 0;
        if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
                return 1;
@@ -306,7 +306,7 @@ static inline int ext4_should_writeback_data(struct inode *inode)
                return 0;
        if (EXT4_JOURNAL(inode) == NULL)
                return 1;
-       if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+       if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
                return 0;
        if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
                return 1;
index f37555909872f5b102f1fef31ed51d08923d7790..8b8bae4c0cf4a0c8c2d043569858106708902b26 100644 (file)
@@ -107,8 +107,11 @@ static int ext4_ext_truncate_extend_restart(handle_t *handle,
        if (err <= 0)
                return err;
        err = ext4_truncate_restart_trans(handle, inode, needed);
-       if (err == 0)
-               err = -EAGAIN;
+       /*
+        * We have dropped i_data_sem so someone might have cached again
+        * an extent we are going to truncate.
+        */
+       ext4_ext_invalidate_cache(inode);
 
        return err;
 }
@@ -293,44 +296,29 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
  * to allocate @blocks
  * Worse case is one block per extent
  */
-int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
+int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
 {
-       struct ext4_inode_info *ei = EXT4_I(inode);
-       int idxs, num = 0;
+       int lcap, icap, rcap, leafs, idxs, num;
+       int newextents = blocks;
 
-       idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
-               / sizeof(struct ext4_extent_idx));
+       rcap = ext4_ext_space_root_idx(inode, 0);
+       lcap = ext4_ext_space_block(inode, 0);
+       icap = ext4_ext_space_block_idx(inode, 0);
 
-       /*
-        * If the new delayed allocation block is contiguous with the
-        * previous da block, it can share index blocks with the
-        * previous block, so we only need to allocate a new index
-        * block every idxs leaf blocks.  At ldxs**2 blocks, we need
-        * an additional index block, and at ldxs**3 blocks, yet
-        * another index blocks.
-        */
-       if (ei->i_da_metadata_calc_len &&
-           ei->i_da_metadata_calc_last_lblock+1 == lblock) {
-               if ((ei->i_da_metadata_calc_len % idxs) == 0)
-                       num++;
-               if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
-                       num++;
-               if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
-                       num++;
-                       ei->i_da_metadata_calc_len = 0;
-               } else
-                       ei->i_da_metadata_calc_len++;
-               ei->i_da_metadata_calc_last_lblock++;
-               return num;
-       }
+       /* number of new leaf blocks needed */
+       num = leafs = (newextents + lcap - 1) / lcap;
 
        /*
-        * In the worst case we need a new set of index blocks at
-        * every level of the inode's extent tree.
+        * Worse case, we need separate index block(s)
+        * to link all new leaf blocks
         */
-       ei->i_da_metadata_calc_len = 1;
-       ei->i_da_metadata_calc_last_lblock = lblock;
-       return ext_depth(inode) + 1;
+       idxs = (leafs + icap - 1) / icap;
+       do {
+               num += idxs;
+               idxs = (idxs + icap - 1) / icap;
+       } while (idxs > rcap);
+
+       return num;
 }
 
 static int
@@ -1948,7 +1936,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
 
        BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
                        cex->ec_type != EXT4_EXT_CACHE_EXTENT);
-       if (in_range(block, cex->ec_block, cex->ec_len)) {
+       if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
                ex->ee_block = cpu_to_le32(cex->ec_block);
                ext4_ext_store_pblock(ex, cex->ec_start);
                ex->ee_len = cpu_to_le16(cex->ec_len);
@@ -2260,7 +2248,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
        int depth = ext_depth(inode);
        struct ext4_ext_path *path;
        handle_t *handle;
-       int i, err;
+       int i = 0, err = 0;
 
        ext_debug("truncate since %u\n", start);
 
@@ -2269,26 +2257,23 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
-again:
        ext4_ext_invalidate_cache(inode);
 
        /*
         * We start scanning from right side, freeing all the blocks
         * after i_size and walking into the tree depth-wise.
         */
-       depth = ext_depth(inode);
        path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
        if (path == NULL) {
                ext4_journal_stop(handle);
                return -ENOMEM;
        }
-       path[0].p_depth = depth;
        path[0].p_hdr = ext_inode_hdr(inode);
        if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
                err = -EIO;
                goto out;
        }
-       i = err = 0;
+       path[0].p_depth = depth;
 
        while (i >= 0 && err == 0) {
                if (i == depth) {
@@ -2382,8 +2367,6 @@ again:
 out:
        ext4_ext_drop_refs(path);
        kfree(path);
-       if (err == -EAGAIN)
-               goto again;
        ext4_journal_stop(handle);
 
        return err;
@@ -2448,7 +2431,7 @@ static void bi_complete(struct bio *bio, int error)
 /* FIXME!! we need to try to merge to left or right after zero-out  */
 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
 {
-       int ret;
+       int ret = -EIO;
        struct bio *bio;
        int blkbits, blocksize;
        sector_t ee_pblock;
@@ -2472,9 +2455,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
                        len = ee_len;
 
                bio = bio_alloc(GFP_NOIO, len);
-               if (!bio)
-                       return -ENOMEM;
-
                bio->bi_sector = ee_pblock;
                bio->bi_bdev   = inode->i_sb->s_bdev;
 
@@ -2502,15 +2482,17 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
                submit_bio(WRITE, bio);
                wait_for_completion(&event);
 
-               if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
-                       bio_put(bio);
-                       return -EIO;
+               if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+                       ret = 0;
+               else {
+                       ret = -EIO;
+                       break;
                }
                bio_put(bio);
                ee_len    -= done;
                ee_pblock += done  << (blkbits - 9);
        }
-       return 0;
+       return ret;
 }
 
 #define EXT4_EXT_ZERO_LEN 7
@@ -2535,21 +2517,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        struct ext4_extent *ex2 = NULL;
        struct ext4_extent *ex3 = NULL;
        struct ext4_extent_header *eh;
-       ext4_lblk_t ee_block, eof_block;
+       ext4_lblk_t ee_block;
        unsigned int allocated, ee_len, depth;
        ext4_fsblk_t newblock;
        int err = 0;
        int ret = 0;
-       int may_zeroout;
-
-       ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
-               "block %llu, max_blocks %u\n", inode->i_ino,
-               (unsigned long long)iblock, max_blocks);
-
-       eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
-               inode->i_sb->s_blocksize_bits;
-       if (eof_block < iblock + max_blocks)
-               eof_block = iblock + max_blocks;
 
        depth = ext_depth(inode);
        eh = path[depth].p_hdr;
@@ -2558,23 +2530,16 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        ee_len = ext4_ext_get_actual_len(ex);
        allocated = ee_len - (iblock - ee_block);
        newblock = iblock - ee_block + ext_pblock(ex);
-
        ex2 = ex;
        orig_ex.ee_block = ex->ee_block;
        orig_ex.ee_len   = cpu_to_le16(ee_len);
        ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
 
-       /*
-        * It is safe to convert extent to initialized via explicit
-        * zeroout only if extent is fully insde i_size or new_size.
-        */
-       may_zeroout = ee_block + ee_len <= eof_block;
-
        err = ext4_ext_get_access(handle, inode, path + depth);
        if (err)
                goto out;
        /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
-       if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
+       if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
                err =  ext4_ext_zeroout(inode, &orig_ex);
                if (err)
                        goto fix_extent_len;
@@ -2605,7 +2570,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        if (allocated > max_blocks) {
                unsigned int newdepth;
                /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
-               if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
+               if (allocated <= EXT4_EXT_ZERO_LEN) {
                        /*
                         * iblock == ee_block is handled by the zerouout
                         * at the beginning.
@@ -2681,7 +2646,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                ex3->ee_len = cpu_to_le16(allocated - max_blocks);
                ext4_ext_mark_uninitialized(ex3);
                err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
-               if (err == -ENOSPC && may_zeroout) {
+               if (err == -ENOSPC) {
                        err =  ext4_ext_zeroout(inode, &orig_ex);
                        if (err)
                                goto fix_extent_len;
@@ -2705,10 +2670,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                 * update the extent length after successful insert of the
                 * split extent
                 */
-               ee_len -= ext4_ext_get_actual_len(ex3);
-               orig_ex.ee_len = cpu_to_le16(ee_len);
-               may_zeroout = ee_block + ee_len <= eof_block;
-
+               orig_ex.ee_len = cpu_to_le16(ee_len -
+                                               ext4_ext_get_actual_len(ex3));
                depth = newdepth;
                ext4_ext_drop_refs(path);
                path = ext4_ext_find_extent(inode, iblock, path);
@@ -2732,7 +2695,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                 * otherwise give the extent a chance to merge to left
                 */
                if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
-                       iblock != ee_block && may_zeroout) {
+                                                       iblock != ee_block) {
                        err =  ext4_ext_zeroout(inode, &orig_ex);
                        if (err)
                                goto fix_extent_len;
@@ -2801,7 +2764,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        goto out;
 insert:
        err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
-       if (err == -ENOSPC && may_zeroout) {
+       if (err == -ENOSPC) {
                err =  ext4_ext_zeroout(inode, &orig_ex);
                if (err)
                        goto fix_extent_len;
@@ -2861,21 +2824,14 @@ static int ext4_split_unwritten_extents(handle_t *handle,
        struct ext4_extent *ex2 = NULL;
        struct ext4_extent *ex3 = NULL;
        struct ext4_extent_header *eh;
-       ext4_lblk_t ee_block, eof_block;
+       ext4_lblk_t ee_block;
        unsigned int allocated, ee_len, depth;
        ext4_fsblk_t newblock;
        int err = 0;
-       int may_zeroout;
-
-       ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
-               "block %llu, max_blocks %u\n", inode->i_ino,
-               (unsigned long long)iblock, max_blocks);
-
-       eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
-               inode->i_sb->s_blocksize_bits;
-       if (eof_block < iblock + max_blocks)
-               eof_block = iblock + max_blocks;
 
+       ext_debug("ext4_split_unwritten_extents: inode %lu,"
+                 "iblock %llu, max_blocks %u\n", inode->i_ino,
+                 (unsigned long long)iblock, max_blocks);
        depth = ext_depth(inode);
        eh = path[depth].p_hdr;
        ex = path[depth].p_ext;
@@ -2883,18 +2839,11 @@ static int ext4_split_unwritten_extents(handle_t *handle,
        ee_len = ext4_ext_get_actual_len(ex);
        allocated = ee_len - (iblock - ee_block);
        newblock = iblock - ee_block + ext_pblock(ex);
-
        ex2 = ex;
        orig_ex.ee_block = ex->ee_block;
        orig_ex.ee_len   = cpu_to_le16(ee_len);
        ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
 
-       /*
-        * It is safe to convert extent to initialized via explicit
-        * zeroout only if extent is fully insde i_size or new_size.
-        */
-       may_zeroout = ee_block + ee_len <= eof_block;
-
        /*
         * If the uninitialized extent begins at the same logical
         * block where the write begins, and the write completely
@@ -2929,7 +2878,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
                ex3->ee_len = cpu_to_le16(allocated - max_blocks);
                ext4_ext_mark_uninitialized(ex3);
                err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
-               if (err == -ENOSPC && may_zeroout) {
+               if (err == -ENOSPC) {
                        err =  ext4_ext_zeroout(inode, &orig_ex);
                        if (err)
                                goto fix_extent_len;
@@ -2953,10 +2902,8 @@ static int ext4_split_unwritten_extents(handle_t *handle,
                 * update the extent length after successful insert of the
                 * split extent
                 */
-               ee_len -= ext4_ext_get_actual_len(ex3);
-               orig_ex.ee_len = cpu_to_le16(ee_len);
-               may_zeroout = ee_block + ee_len <= eof_block;
-
+               orig_ex.ee_len = cpu_to_le16(ee_len -
+                                               ext4_ext_get_actual_len(ex3));
                depth = newdepth;
                ext4_ext_drop_refs(path);
                path = ext4_ext_find_extent(inode, iblock, path);
@@ -3002,7 +2949,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
        goto out;
 insert:
        err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
-       if (err == -ENOSPC && may_zeroout) {
+       if (err == -ENOSPC) {
                err =  ext4_ext_zeroout(inode, &orig_ex);
                if (err)
                        goto fix_extent_len;
@@ -3082,14 +3029,6 @@ out:
        return err;
 }
 
-static void unmap_underlying_metadata_blocks(struct block_device *bdev,
-                       sector_t block, int count)
-{
-       int i;
-       for (i = 0; i < count; i++)
-                unmap_underlying_metadata(bdev, block + i);
-}
-
 static int
 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                        ext4_lblk_t iblock, unsigned int max_blocks,
@@ -3120,7 +3059,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                if (io)
                        io->flag = DIO_AIO_UNWRITTEN;
                else
-                       ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+                       EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;
                goto out;
        }
        /* async DIO end_io complete, convert the filled extent to written */
@@ -3165,30 +3104,6 @@ out:
        } else
                allocated = ret;
        set_buffer_new(bh_result);
-       /*
-        * if we allocated more blocks than requested
-        * we need to make sure we unmap the extra block
-        * allocated. The actual needed block will get
-        * unmapped later when we find the buffer_head marked
-        * new.
-        */
-       if (allocated > max_blocks) {
-               unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
-                                       newblock + max_blocks,
-                                       allocated - max_blocks);
-               allocated = max_blocks;
-       }
-
-       /*
-        * If we have done fallocate with the offset that is already
-        * delayed allocated, we would have block reservation
-        * and quota reservation done in the delayed write path.
-        * But fallocate would have already updated quota and block
-        * count for this offset. So cancel these reservation
-        */
-       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-               ext4_da_update_reserve_space(inode, allocated, 0);
-
 map_out:
        set_buffer_mapped(bh_result);
 out1:
@@ -3229,9 +3144,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
 {
        struct ext4_ext_path *path = NULL;
        struct ext4_extent_header *eh;
-       struct ext4_extent newex, *ex, *last_ex;
+       struct ext4_extent newex, *ex;
        ext4_fsblk_t newblock;
-       int i, err = 0, depth, ret, cache_type;
+       int err = 0, depth, ret, cache_type;
        unsigned int allocated = 0;
        struct ext4_allocation_request ar;
        ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3281,13 +3196,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
         * this situation is possible, though, _during_ tree modification;
         * this is why assert can't be put in ext4_ext_find_extent()
         */
-       if (path[depth].p_ext == NULL && depth != 0) {
-               ext4_error(inode->i_sb, __func__, "bad extent address "
-                          "inode: %lu, iblock: %lu, depth: %d",
-                          inode->i_ino, (unsigned long) iblock, depth);
-               err = -EIO;
-               goto out2;
-       }
+       BUG_ON(path[depth].p_ext == NULL && depth != 0);
        eh = path[depth].p_hdr;
 
        ex = path[depth].p_ext;
@@ -3302,7 +3211,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
                 */
                ee_len = ext4_ext_get_actual_len(ex);
                /* if found extent covers block, simply return it */
-               if (in_range(iblock, ee_block, ee_len)) {
+               if (iblock >= ee_block && iblock < ee_block + ee_len) {
                        newblock = iblock - ee_block + ee_start;
                        /* number of remaining blocks in the extent */
                        allocated = ee_len - (iblock - ee_block);
@@ -3406,36 +3315,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
                        if (io)
                                io->flag = DIO_AIO_UNWRITTEN;
                        else
-                               ext4_set_inode_state(inode,
-                                                    EXT4_STATE_DIO_UNWRITTEN);
+                               EXT4_I(inode)->i_state |=
+                                       EXT4_STATE_DIO_UNWRITTEN;;
                }
        }
-
-       if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
-               if (unlikely(!eh->eh_entries)) {
-                       ext4_error(inode->i_sb, __func__,
-                                  "inode#%lu, eh->eh_entries = 0 and "
-                                  "EOFBLOCKS_FL set", inode->i_ino);
-                       err = -EIO;
-                       goto out2;
-               }
-               last_ex = EXT_LAST_EXTENT(eh);
-               /*
-                * If the current leaf block was reached by looking at
-                * the last index block all the way down the tree, and
-                * we are extending the inode beyond the last extent
-                * in the current leaf block, then clear the
-                * EOFBLOCKS_FL flag.
-                */
-               for (i = depth-1; i >= 0; i--) {
-                       if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
-                               break;
-               }
-               if ((i < 0) &&
-                   (iblock + ar.len > le32_to_cpu(last_ex->ee_block) +
-                    ext4_ext_get_actual_len(last_ex)))
-                       ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
-       }
        err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
        if (err) {
                /* free data blocks we just allocated */
@@ -3450,17 +3333,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
        /* previous routine could use block we allocated */
        newblock = ext_pblock(&newex);
        allocated = ext4_ext_get_actual_len(&newex);
-       if (allocated > max_blocks)
-               allocated = max_blocks;
        set_buffer_new(bh_result);
 
-       /*
-        * Update reserved blocks/metadata blocks after successful
-        * block allocation which had been deferred till now.
-        */
-       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-               ext4_da_update_reserve_space(inode, allocated, 1);
-
        /*
         * Cache the extent and update transaction to commit on fdatasync only
         * when it is _not_ an uninitialized extent.
@@ -3569,13 +3443,6 @@ static void ext4_falloc_update_inode(struct inode *inode,
                        i_size_write(inode, new_size);
                if (new_size > EXT4_I(inode)->i_disksize)
                        ext4_update_i_disksize(inode, new_size);
-       } else {
-               /*
-                * Mark that we allocate beyond EOF so the subsequent truncate
-                * can proceed even if the new size is the same as i_size.
-                */
-               if (new_size > i_size_read(inode))
-                       ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
        }
 
 }
@@ -3603,7 +3470,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
         * currently supporting (pre)allocate mode for extent-based
         * files _only_
         */
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
                return -EOPNOTSUPP;
 
        /* preallocation to directories is currently not supported */
@@ -3622,11 +3489,6 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
         */
        credits = ext4_chunk_trans_blocks(inode, max_blocks);
        mutex_lock(&inode->i_mutex);
-       ret = inode_newsize_ok(inode, (len + offset));
-       if (ret) {
-               mutex_unlock(&inode->i_mutex);
-               return ret;
-       }
 retry:
        while (ret >= 0 && ret < max_blocks) {
                block = block + ret;
@@ -3685,7 +3547,7 @@ retry:
  * Returns 0 on success.
  */
 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
-                                   ssize_t len)
+                                   loff_t len)
 {
        handle_t *handle;
        ext4_lblk_t block;
@@ -3821,7 +3683,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
        int error = 0;
 
        /* in-inode? */
-       if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
+       if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
                struct ext4_iloc iloc;
                int offset;     /* offset of xattr in inode */
 
@@ -3834,7 +3696,6 @@ static int ext4_xattr_fiemap(struct inode *inode,
                physical += offset;
                length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
                flags |= FIEMAP_EXTENT_DATA_INLINE;
-               brelse(iloc.bh);
        } else { /* external block */
                physical = EXT4_I(inode)->i_file_acl << blockbits;
                length = inode->i_sb->s_blocksize;
@@ -3850,10 +3711,11 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len)
 {
        ext4_lblk_t start_blk;
+       ext4_lblk_t len_blks;
        int error = 0;
 
        /* fallback to generic here if not in extents fmt */
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
                return generic_block_fiemap(inode, fieinfo, start, len,
                        ext4_get_block);
 
@@ -3863,14 +3725,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
                error = ext4_xattr_fiemap(inode, fieinfo);
        } else {
-               ext4_lblk_t len_blks;
-               __u64 last_blk;
-
                start_blk = start >> inode->i_sb->s_blocksize_bits;
-               last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
-               if (last_blk >= EXT_MAX_BLOCK)
-                       last_blk = EXT_MAX_BLOCK-1;
-               len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
+               len_blks = len >> inode->i_sb->s_blocksize_bits;
 
                /*
                 * Walk the extent tree gathering extent information.
index 2a6054129bbc7acb401064b541d4cc0f11f8cea3..9630583cef280d541e87505eba2cfa37589266e3 100644 (file)
@@ -35,9 +35,9 @@
  */
 static int ext4_release_file(struct inode *inode, struct file *filp)
 {
-       if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
+       if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
                ext4_alloc_da_blocks(inode);
-               ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
        }
        /* if we are the last writer on the inode, drop the block reservation */
        if ((filp->f_mode & FMODE_WRITE) &&
@@ -65,7 +65,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
         * is smaller than s_maxbytes, which is for extent-mapped files.
         */
 
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
                size_t length = iov_length(iov, nr_segs);
 
index c3660a60c30051534d0ed891ee07eb79574ccd31..d6049e40b161722a26c64b0027bccd8c20c12bf5 100644 (file)
 
 #include <trace/events/ext4.h>
 
-/*
- * If we're not journaling and this is a just-created file, we have to
- * sync our parent directory (if it was freshly created) since
- * otherwise it will only be written by writeback, leaving a huge
- * window during which a crash may lose the file.  This may apply for
- * the parent directory's parent as well, and so on recursively, if
- * they are also freshly created.
- */
-static void ext4_sync_parent(struct inode *inode)
-{
-       struct dentry *dentry = NULL;
-
-       while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
-               ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
-               dentry = list_entry(inode->i_dentry.next,
-                                   struct dentry, d_alias);
-               if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
-                       break;
-               inode = dentry->d_parent->d_inode;
-               sync_mapping_buffers(inode->i_mapping);
-       }
-}
-
 /*
  * akpm: A new design for ext4_sync_file().
  *
@@ -90,12 +67,8 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
        if (ret < 0)
                return ret;
 
-       if (!journal) {
-               ret = simple_fsync(file, dentry, datasync);
-               if (!ret && !list_empty(&inode->i_dentry))
-                       ext4_sync_parent(inode);
-               return ret;
-       }
+       if (!journal)
+               return simple_fsync(file, dentry, datasync);
 
        /*
         * data=writeback,ordered:
@@ -115,21 +88,9 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
                return ext4_force_commit(inode->i_sb);
 
        commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
-       if (jbd2_log_start_commit(journal, commit_tid)) {
-               /*
-                * When the journal is on a different device than the
-                * fs data disk, we need to issue the barrier in
-                * writeback mode.  (In ordered mode, the jbd2 layer
-                * will take care of issuing the barrier.  In
-                * data=journal, all of the data blocks are written to
-                * the journal device.)
-                */
-               if (ext4_should_writeback_data(inode) &&
-                   (journal->j_fs_dev != journal->j_dev) &&
-                   (journal->j_flags & JBD2_BARRIER))
-                       blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
-               ret = jbd2_log_wait_commit(journal, commit_tid);
-       } else if (journal->j_flags & JBD2_BARRIER)
+       if (jbd2_log_start_commit(journal, commit_tid))
+               jbd2_log_wait_commit(journal, commit_tid);
+       else if (journal->j_flags & JBD2_BARRIER)
                blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
        return ret;
 }
index 55a93f5bb0031a9c3836601fbe51374033310bd5..f3624ead4f6c5136189e759d0bba8d0c9c2ca356 100644 (file)
@@ -244,50 +244,57 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
        if (fatal)
                goto error_return;
 
-       fatal = -ESRCH;
-       gdp = ext4_get_group_desc(sb, block_group, &bh2);
-       if (gdp) {
+       /* Ok, now we can actually update the inode bitmaps.. */
+       cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
+                                       bit, bitmap_bh->b_data);
+       if (!cleared)
+               ext4_error(sb, "ext4_free_inode",
+                          "bit already cleared for inode %lu", ino);
+       else {
+               gdp = ext4_get_group_desc(sb, block_group, &bh2);
+
                BUFFER_TRACE(bh2, "get_write_access");
                fatal = ext4_journal_get_write_access(handle, bh2);
-       }
-       ext4_lock_group(sb, block_group);
-       cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
-       if (fatal || !cleared) {
-               ext4_unlock_group(sb, block_group);
-               goto out;
-       }
-
-       count = ext4_free_inodes_count(sb, gdp) + 1;
-       ext4_free_inodes_set(sb, gdp, count);
-       if (is_directory) {
-               count = ext4_used_dirs_count(sb, gdp) - 1;
-               ext4_used_dirs_set(sb, gdp, count);
-               percpu_counter_dec(&sbi->s_dirs_counter);
-       }
-       gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
-       ext4_unlock_group(sb, block_group);
-
-       percpu_counter_inc(&sbi->s_freeinodes_counter);
-       if (sbi->s_log_groups_per_flex) {
-               ext4_group_t f = ext4_flex_group(sbi, block_group);
+               if (fatal) goto error_return;
+
+               if (gdp) {
+                       ext4_lock_group(sb, block_group);
+                       count = ext4_free_inodes_count(sb, gdp) + 1;
+                       ext4_free_inodes_set(sb, gdp, count);
+                       if (is_directory) {
+                               count = ext4_used_dirs_count(sb, gdp) - 1;
+                               ext4_used_dirs_set(sb, gdp, count);
+                               if (sbi->s_log_groups_per_flex) {
+                                       ext4_group_t f;
+
+                                       f = ext4_flex_group(sbi, block_group);
+                                       atomic_dec(&sbi->s_flex_groups[f].free_inodes);
+                               }
 
-               atomic_inc(&sbi->s_flex_groups[f].free_inodes);
-               if (is_directory)
-                       atomic_dec(&sbi->s_flex_groups[f].used_dirs);
+                       }
+                       gdp->bg_checksum = ext4_group_desc_csum(sbi,
+                                                       block_group, gdp);
+                       ext4_unlock_group(sb, block_group);
+                       percpu_counter_inc(&sbi->s_freeinodes_counter);
+                       if (is_directory)
+                               percpu_counter_dec(&sbi->s_dirs_counter);
+
+                       if (sbi->s_log_groups_per_flex) {
+                               ext4_group_t f;
+
+                               f = ext4_flex_group(sbi, block_group);
+                               atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+                       }
+               }
+               BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+               err = ext4_handle_dirty_metadata(handle, NULL, bh2);
+               if (!fatal) fatal = err;
        }
-       BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
-       fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
-out:
-       if (cleared) {
-               BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
-               err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
-               if (!fatal)
-                       fatal = err;
-               sb->s_dirt = 1;
-       } else
-               ext4_error(sb, "ext4_free_inode",
-                          "bit already cleared for inode %lu", ino);
-
+       BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
+       err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+       if (!fatal)
+               fatal = err;
+       sb->s_dirt = 1;
 error_return:
        brelse(bitmap_bh);
        ext4_std_error(sb, fatal);
@@ -497,7 +504,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
 
        if (S_ISDIR(mode) &&
            ((parent == sb->s_root->d_inode) ||
-            (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
+            (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
                int best_ndir = inodes_per_group;
                int ret = -1;
 
@@ -772,7 +779,7 @@ static int ext4_claim_inode(struct super_block *sb,
                if (sbi->s_log_groups_per_flex) {
                        ext4_group_t f = ext4_flex_group(sbi, group);
 
-                       atomic_inc(&sbi->s_flex_groups[f].used_dirs);
+                       atomic_inc(&sbi->s_flex_groups[f].free_inodes);
                }
        }
        gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
@@ -897,7 +904,7 @@ repeat_in_this_group:
                                BUFFER_TRACE(inode_bitmap_bh,
                                        "call ext4_handle_dirty_metadata");
                                err = ext4_handle_dirty_metadata(handle,
-                                                                NULL,
+                                                                inode,
                                                        inode_bitmap_bh);
                                if (err)
                                        goto fail;
@@ -1022,8 +1029,7 @@ got:
        inode->i_generation = sbi->s_next_generation++;
        spin_unlock(&sbi->s_next_gen_lock);
 
-       ei->i_state_flags = 0;
-       ext4_set_inode_state(inode, EXT4_STATE_NEW);
+       ei->i_state = EXT4_STATE_NEW;
 
        ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
 
@@ -1044,7 +1050,7 @@ got:
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
                /* set extent flag only for directory, file and normal symlink*/
                if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
-                       ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
+                       EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
                        ext4_ext_tree_init(handle, inode);
                }
        }
index 1b23f9d2ef5668a5d992b42fd77d38fda3e197bc..e233879ebbcb0595651ced91b3ecafc8fc492685 100644 (file)
@@ -957,7 +957,7 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
        int count = 0;
        ext4_fsblk_t first_block = 0;
 
-       J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
+       J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
        J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
        depth = ext4_block_to_path(inode, iblock, offsets,
                                   &blocks_to_boundary);
@@ -1051,115 +1051,81 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
        return &EXT4_I(inode)->i_reserved_quota;
 }
 #endif
-
 /*
  * Calculate the number of metadata blocks need to reserve
- * to allocate a new block at @lblocks for non extent file based file
+ * to allocate @blocks for non extent file based file
  */
-static int ext4_indirect_calc_metadata_amount(struct inode *inode,
-                                             sector_t lblock)
+static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
 {
-       struct ext4_inode_info *ei = EXT4_I(inode);
-       sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
-       int blk_bits;
+       int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+       int ind_blks, dind_blks, tind_blks;
 
-       if (lblock < EXT4_NDIR_BLOCKS)
-               return 0;
+       /* number of new indirect blocks needed */
+       ind_blks = (blocks + icap - 1) / icap;
 
-       lblock -= EXT4_NDIR_BLOCKS;
+       dind_blks = (ind_blks + icap - 1) / icap;
 
-       if (ei->i_da_metadata_calc_len &&
-           (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
-               ei->i_da_metadata_calc_len++;
-               return 0;
-       }
-       ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
-       ei->i_da_metadata_calc_len = 1;
-       blk_bits = order_base_2(lblock);
-       return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
+       tind_blks = 1;
+
+       return ind_blks + dind_blks + tind_blks;
 }
 
 /*
  * Calculate the number of metadata blocks need to reserve
- * to allocate a block located at @lblock
+ * to allocate given number of blocks
  */
-static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
+static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
 {
-       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               return ext4_ext_calc_metadata_amount(inode, lblock);
+       if (!blocks)
+               return 0;
 
-       return ext4_indirect_calc_metadata_amount(inode, lblock);
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_calc_metadata_amount(inode, blocks);
+
+       return ext4_indirect_calc_metadata_amount(inode, blocks);
 }
 
-/*
- * Called with i_data_sem down, which is important since we can call
- * ext4_discard_preallocations() from here.
- */
-void ext4_da_update_reserve_space(struct inode *inode,
-                                       int used, int quota_claim)
+static void ext4_da_update_reserve_space(struct inode *inode, int used)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       struct ext4_inode_info *ei = EXT4_I(inode);
-       int mdb_free = 0, allocated_meta_blocks = 0;
-
-       spin_lock(&ei->i_block_reservation_lock);
-       if (unlikely(used > ei->i_reserved_data_blocks)) {
-               ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
-                        "with only %d reserved data blocks\n",
-                        __func__, inode->i_ino, used,
-                        ei->i_reserved_data_blocks);
-               WARN_ON(1);
-               used = ei->i_reserved_data_blocks;
-       }
-
-       /* Update per-inode reservations */
-       ei->i_reserved_data_blocks -= used;
-       used += ei->i_allocated_meta_blocks;
-       ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
-       allocated_meta_blocks = ei->i_allocated_meta_blocks;
-       ei->i_allocated_meta_blocks = 0;
-       percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
-
-       if (ei->i_reserved_data_blocks == 0) {
-               /*
-                * We can release all of the reserved metadata blocks
-                * only when we have written all of the delayed
-                * allocation blocks.
-                */
-               mdb_free = ei->i_reserved_meta_blocks;
-               ei->i_reserved_meta_blocks = 0;
-               ei->i_da_metadata_calc_len = 0;
+       int total, mdb, mdb_free;
+
+       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       /* recalculate the number of metablocks still need to be reserved */
+       total = EXT4_I(inode)->i_reserved_data_blocks - used;
+       mdb = ext4_calc_metadata_amount(inode, total);
+
+       /* figure out how many metablocks to release */
+       BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
+       mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
+
+       if (mdb_free) {
+               /* Account for allocated meta_blocks */
+               mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
+
+               /* update fs dirty blocks counter */
                percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
+               EXT4_I(inode)->i_allocated_meta_blocks = 0;
+               EXT4_I(inode)->i_reserved_meta_blocks = mdb;
        }
+
+       /* update per-inode reservations */
+       BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
+       EXT4_I(inode)->i_reserved_data_blocks -= used;
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
-       /* Update quota subsystem */
-       if (quota_claim) {
-               vfs_dq_claim_block(inode, used);
-               if (mdb_free)
-                       vfs_dq_release_reservation_block(inode, mdb_free);
-       } else {
-               /*
-                * We did fallocate with an offset that is already delayed
-                * allocated. So on delayed allocated writeback we should
-                * not update the quota for allocated blocks. But then
-                * converting an fallocate region to initialized region would
-                * have caused a metadata allocation. So claim quota for
-                * that
-                */
-               if (allocated_meta_blocks)
-                       vfs_dq_claim_block(inode, allocated_meta_blocks);
-               vfs_dq_release_reservation_block(inode, mdb_free + used -
-                                               allocated_meta_blocks);
-       }
+       /*
+        * free those over-booking quota for metadata blocks
+        */
+       if (mdb_free)
+               vfs_dq_release_reservation_block(inode, mdb_free);
 
        /*
         * If we have done all the pending block allocations and if
         * there aren't any writers on the inode, we can discard the
         * inode's preallocations.
         */
-       if ((ei->i_reserved_data_blocks == 0) &&
-           (atomic_read(&inode->i_writecount) == 0))
+       if (!total && (atomic_read(&inode->i_writecount) == 0))
                ext4_discard_preallocations(inode);
 }
 
@@ -1274,7 +1240,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
         * file system block.
         */
        down_read((&EXT4_I(inode)->i_data_sem));
-       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
                retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
                                bh, 0);
        } else {
@@ -1336,7 +1302,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
         * We need to check for EXT4 here because migrate
         * could have changed the inode type in between
         */
-       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
                retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
                                              bh, flags);
        } else {
@@ -1349,22 +1315,20 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
                         * i_data's format changing.  Force the migrate
                         * to fail by clearing migrate flags
                         */
-                       ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
+                       EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
                }
-
-               /*
-                * Update reserved blocks/metadata blocks after successful
-                * block allocation which had been deferred till now. We don't
-                * support fallocate for non extent files. So we can update
-                * reserve space here.
-                */
-               if ((retval > 0) &&
-                       (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
-                       ext4_da_update_reserve_space(inode, retval, 1);
        }
+
        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
                EXT4_I(inode)->i_delalloc_reserved_flag = 0;
 
+       /*
+        * Update reserved blocks/metadata blocks after successful
+        * block allocation which had been deferred till now.
+        */
+       if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
+               ext4_da_update_reserve_space(inode, retval);
+
        up_write((&EXT4_I(inode)->i_data_sem));
        if (retval > 0 && buffer_mapped(bh)) {
                int ret = check_block_validity(inode, "file system "
@@ -1836,7 +1800,7 @@ static int ext4_journalled_write_end(struct file *file,
        new_i_size = pos + copied;
        if (new_i_size > inode->i_size)
                i_size_write(inode, pos+copied);
-       ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+       EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
        if (new_i_size > EXT4_I(inode)->i_disksize) {
                ext4_update_i_disksize(inode, new_i_size);
                ret2 = ext4_mark_inode_dirty(handle, inode);
@@ -1870,15 +1834,11 @@ static int ext4_journalled_write_end(struct file *file,
        return ret ? ret : copied;
 }
 
-/*
- * Reserve a single block located at lblock
- */
-static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
+static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
 {
        int retries = 0;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       struct ext4_inode_info *ei = EXT4_I(inode);
-       unsigned long md_needed, md_reserved;
+       unsigned long md_needed, mdblocks, total = 0;
 
        /*
         * recalculate the amount of metadata blocks to reserve
@@ -1886,31 +1846,35 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
         * worse case is one extent per block
         */
 repeat:
-       spin_lock(&ei->i_block_reservation_lock);
-       md_reserved = ei->i_reserved_meta_blocks;
-       md_needed = ext4_calc_metadata_amount(inode, lblock);
-       spin_unlock(&ei->i_block_reservation_lock);
+       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
+       mdblocks = ext4_calc_metadata_amount(inode, total);
+       BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
+
+       md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
+       total = md_needed + nrblocks;
+       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
        /*
         * Make quota reservation here to prevent quota overflow
         * later. Real quota accounting is done at pages writeout
         * time.
         */
-       if (vfs_dq_reserve_block(inode, md_needed + 1))
+       if (vfs_dq_reserve_block(inode, total))
                return -EDQUOT;
 
-       if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
-               vfs_dq_release_reservation_block(inode, md_needed + 1);
+       if (ext4_claim_free_blocks(sbi, total)) {
+               vfs_dq_release_reservation_block(inode, total);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
                return -ENOSPC;
        }
-       spin_lock(&ei->i_block_reservation_lock);
-       ei->i_reserved_data_blocks++;
-       ei->i_reserved_meta_blocks += md_needed;
-       spin_unlock(&ei->i_block_reservation_lock);
+       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
+       EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
+       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
        return 0;       /* success */
 }
@@ -1918,46 +1882,49 @@ repeat:
 static void ext4_da_release_space(struct inode *inode, int to_free)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       struct ext4_inode_info *ei = EXT4_I(inode);
+       int total, mdb, mdb_free, release;
 
        if (!to_free)
                return;         /* Nothing to release, exit */
 
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 
-       if (unlikely(to_free > ei->i_reserved_data_blocks)) {
+       if (!EXT4_I(inode)->i_reserved_data_blocks) {
                /*
-                * if there aren't enough reserved blocks, then the
-                * counter is messed up somewhere.  Since this
-                * function is called from invalidate page, it's
-                * harmless to return without any action.
+                * if there is no reserved blocks, but we try to free some
+                * then the counter is messed up somewhere.
+                * but since this function is called from invalidate
+                * page, it's harmless to return without any action
                 */
-               ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
-                        "ino %lu, to_free %d with only %d reserved "
-                        "data blocks\n", inode->i_ino, to_free,
-                        ei->i_reserved_data_blocks);
-               WARN_ON(1);
-               to_free = ei->i_reserved_data_blocks;
+               printk(KERN_INFO "ext4 delalloc try to release %d reserved "
+                           "blocks for inode %lu, but there is no reserved "
+                           "data blocks\n", to_free, inode->i_ino);
+               spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               return;
        }
-       ei->i_reserved_data_blocks -= to_free;
 
-       if (ei->i_reserved_data_blocks == 0) {
-               /*
-                * We can release all of the reserved metadata blocks
-                * only when we have written all of the delayed
-                * allocation blocks.
-                */
-               to_free += ei->i_reserved_meta_blocks;
-               ei->i_reserved_meta_blocks = 0;
-               ei->i_da_metadata_calc_len = 0;
-       }
+       /* recalculate the number of metablocks still need to be reserved */
+       total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
+       mdb = ext4_calc_metadata_amount(inode, total);
+
+       /* figure out how many metablocks to release */
+       BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
+       mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
+
+       release = to_free + mdb_free;
 
-       /* update fs dirty blocks counter */
-       percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
+       /* update fs dirty blocks counter for truncate case */
+       percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
 
+       /* update per-inode reservations */
+       BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
+       EXT4_I(inode)->i_reserved_data_blocks -= to_free;
+
+       BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
+       EXT4_I(inode)->i_reserved_meta_blocks = mdb;
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
-       vfs_dq_release_reservation_block(inode, to_free);
+       vfs_dq_release_reservation_block(inode, release);
 }
 
 static void ext4_da_page_release_reservation(struct page *page,
@@ -2262,10 +2229,10 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
         * variables are updated after the blocks have been allocated.
         */
        new.b_state = 0;
-       get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
+       get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
+                           EXT4_GET_BLOCKS_DELALLOC_RESERVE);
        if (mpd->b_state & (1 << BH_Delay))
-               get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
-
+               get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
        blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
                               &new, get_blocks_flags);
        if (blks < 0) {
@@ -2294,7 +2261,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                ext4_msg(mpd->inode->i_sb, KERN_CRIT,
                         "delayed block allocation failed for inode %lu at "
                         "logical offset %llu with max blocks %zd with "
-                        "error %d", mpd->inode->i_ino,
+                        "error %d\n", mpd->inode->i_ino,
                         (unsigned long long) next,
                         mpd->b_size >> mpd->inode->i_blkbits, err);
                printk(KERN_CRIT "This should not happen!!  "
@@ -2361,17 +2328,8 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
        sector_t next;
        int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
 
-       /*
-        * XXX Don't go larger than mballoc is willing to allocate
-        * This is a stopgap solution.  We eventually need to fold
-        * mpage_da_submit_io() into this function and then call
-        * ext4_get_blocks() multiple times in a loop
-        */
-       if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
-               goto flush_it;
-
        /* check if thereserved journal credits might overflow */
-       if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
+       if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
                if (nrblocks >= EXT4_MAX_TRANS_DATA) {
                        /*
                         * With non-extent format we are limited by the journal
@@ -2572,7 +2530,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                 * XXX: __block_prepare_write() unmaps passed block,
                 * is it OK?
                 */
-               ret = ext4_da_reserve_space(inode, iblock);
+               ret = ext4_da_reserve_space(inode, 1);
                if (ret)
                        /* not enough space to reserve */
                        return ret;
@@ -2683,7 +2641,7 @@ static int __ext4_journalled_writepage(struct page *page,
                ret = err;
 
        walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
-       ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+       EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
 out:
        return ret;
 }
@@ -2836,7 +2794,7 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
         * number of contiguous block. So we will limit
         * number of contiguous block to a sane value
         */
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
+       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
            (max_blocks > EXT4_MAX_TRANS_DATA))
                max_blocks = EXT4_MAX_TRANS_DATA;
 
@@ -2956,7 +2914,7 @@ retry:
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
-                              "%ld pages, ino %lu; err %d", __func__,
+                              "%ld pages, ino %lu; err %d\n", __func__,
                                wbc->nr_to_write, inode->i_ino, ret);
                        goto out_writepages;
                }
@@ -3031,7 +2989,7 @@ retry:
        if (pages_skipped != wbc->pages_skipped)
                ext4_msg(inode->i_sb, KERN_CRIT,
                         "This should not happen leaving %s "
-                        "with nr_to_write = %ld ret = %d",
+                        "with nr_to_write = %ld ret = %d\n",
                         __func__, wbc->nr_to_write, ret);
 
        /* Update index */
@@ -3047,7 +3005,8 @@ retry:
 out_writepages:
        if (!no_nrwrite_index_update)
                wbc->no_nrwrite_index_update = 0;
-       wbc->nr_to_write -= nr_to_writebump;
+       if (wbc->nr_to_write > nr_to_writebump)
+               wbc->nr_to_write -= nr_to_writebump;
        wbc->range_start = range_start;
        trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
        return ret;
@@ -3072,18 +3031,11 @@ static int ext4_nonda_switch(struct super_block *sb)
        if (2 * free_blocks < 3 * dirty_blocks ||
                free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
                /*
-                * free block count is less than 150% of dirty blocks
-                * or free blocks is less than watermark
+                * free block count is less that 150% of dirty blocks
+                * or free blocks is less that watermark
                 */
                return 1;
        }
-       /*
-        * Even if we don't switch but are nearing capacity,
-        * start pushing delalloc when 1/2 of free blocks are dirty.
-        */
-       if (free_blocks < 2 * dirty_blocks)
-               writeback_inodes_sb_if_idle(sb);
-
        return 0;
 }
 
@@ -3091,7 +3043,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                               loff_t pos, unsigned len, unsigned flags,
                               struct page **pagep, void **fsdata)
 {
-       int ret, retries = 0, quota_retries = 0;
+       int ret, retries = 0;
        struct page *page;
        pgoff_t index;
        unsigned from, to;
@@ -3150,22 +3102,6 @@ retry:
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
                goto retry;
-
-       if ((ret == -EDQUOT) &&
-           EXT4_I(inode)->i_reserved_meta_blocks &&
-           (quota_retries++ < 3)) {
-               /*
-                * Since we often over-estimate the number of meta
-                * data blocks required, we may sometimes get a
-                * spurios out of quota error even though there would
-                * be enough space once we write the data blocks and
-                * find out how many meta data blocks were _really_
-                * required.  So try forcing the inode write to see if
-                * that helps.
-                */
-               write_inode_now(inode, (quota_retries == 3));
-               goto retry;
-       }
 out:
        return ret;
 }
@@ -3354,8 +3290,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
                filemap_write_and_wait(mapping);
        }
 
-       if (EXT4_JOURNAL(inode) &&
-           ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
+       if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
                /*
                 * This is a REALLY heavyweight approach, but the use of
                 * bmap on dirty files is expected to be extremely rare:
@@ -3374,7 +3309,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
                 * everything they get.
                 */
 
-               ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
                journal = EXT4_JOURNAL(inode);
                jbd2_journal_lock_updates(journal);
                err = jbd2_journal_flush(journal);
@@ -3490,9 +3425,6 @@ retry:
                         * but cannot extend i_size. Bail out and pretend
                         * the write failed... */
                        ret = PTR_ERR(handle);
-                       if (inode->i_nlink)
-                               ext4_orphan_del(NULL, inode);
-
                        goto out;
                }
                if (inode->i_nlink)
@@ -3608,7 +3540,7 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
 {
        struct inode *inode = io->inode;
        loff_t offset = io->offset;
-       ssize_t size = io->size;
+       size_t size = io->size;
        int ret = 0;
 
        ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
@@ -3845,8 +3777,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
                        ext4_free_io_end(iocb->private);
                        iocb->private = NULL;
-               } else if (ret > 0 && ext4_test_inode_state(inode,
-                                               EXT4_STATE_DIO_UNWRITTEN)) {
+               } else if (ret > 0 && (EXT4_I(inode)->i_state &
+                                      EXT4_STATE_DIO_UNWRITTEN)) {
                        int err;
                        /*
                         * for non AIO case, since the IO is already
@@ -3856,7 +3788,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                                                             offset, ret);
                        if (err < 0)
                                ret = err;
-                       ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+                       EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
                }
                return ret;
        }
@@ -3872,7 +3804,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
-       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
                return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
 
        return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
@@ -4503,12 +4435,10 @@ void ext4_truncate(struct inode *inode)
        if (!ext4_can_truncate(inode))
                return;
 
-       ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
-
        if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
-               ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
+               ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
 
-       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
                ext4_ext_truncate(inode);
                return;
        }
@@ -4792,7 +4722,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
 {
        /* We have all inode data except xattrs in memory here. */
        return __ext4_get_inode_loc(inode, iloc,
-               !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
+               !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
 }
 
 void ext4_set_inode_flags(struct inode *inode)
@@ -4815,26 +4745,20 @@ void ext4_set_inode_flags(struct inode *inode)
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
 void ext4_get_inode_flags(struct ext4_inode_info *ei)
 {
-       unsigned int vfs_fl;
-       unsigned long old_fl, new_fl;
-
-       do {
-               vfs_fl = ei->vfs_inode.i_flags;
-               old_fl = ei->i_flags;
-               new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
-                               EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
-                               EXT4_DIRSYNC_FL);
-               if (vfs_fl & S_SYNC)
-                       new_fl |= EXT4_SYNC_FL;
-               if (vfs_fl & S_APPEND)
-                       new_fl |= EXT4_APPEND_FL;
-               if (vfs_fl & S_IMMUTABLE)
-                       new_fl |= EXT4_IMMUTABLE_FL;
-               if (vfs_fl & S_NOATIME)
-                       new_fl |= EXT4_NOATIME_FL;
-               if (vfs_fl & S_DIRSYNC)
-                       new_fl |= EXT4_DIRSYNC_FL;
-       } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
+       unsigned int flags = ei->vfs_inode.i_flags;
+
+       ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+                       EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
+       if (flags & S_SYNC)
+               ei->i_flags |= EXT4_SYNC_FL;
+       if (flags & S_APPEND)
+               ei->i_flags |= EXT4_APPEND_FL;
+       if (flags & S_IMMUTABLE)
+               ei->i_flags |= EXT4_IMMUTABLE_FL;
+       if (flags & S_NOATIME)
+               ei->i_flags |= EXT4_NOATIME_FL;
+       if (flags & S_DIRSYNC)
+               ei->i_flags |= EXT4_DIRSYNC_FL;
 }
 
 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
@@ -4892,7 +4816,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        }
        inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 
-       ei->i_state_flags = 0;
+       ei->i_state = 0;
        ei->i_dir_start_lookup = 0;
        ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
        /* We now have enough fields to check if the inode was active or not.
@@ -4975,7 +4899,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                                        EXT4_GOOD_OLD_INODE_SIZE +
                                        ei->i_extra_isize;
                        if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
-                               ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+                               ei->i_state |= EXT4_STATE_XATTR;
                }
        } else
                ei->i_extra_isize = 0;
@@ -5073,7 +4997,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
                 */
                raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
                raw_inode->i_blocks_high = 0;
-               ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
+               ei->i_flags &= ~EXT4_HUGE_FILE_FL;
                return 0;
        }
        if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
@@ -5086,9 +5010,9 @@ static int ext4_inode_blocks_set(handle_t *handle,
                 */
                raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
                raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
-               ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
+               ei->i_flags &= ~EXT4_HUGE_FILE_FL;
        } else {
-               ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
+               ei->i_flags |= EXT4_HUGE_FILE_FL;
                /* i_block is stored in file system block size */
                i_blocks = i_blocks >> (inode->i_blkbits - 9);
                raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
@@ -5115,7 +5039,7 @@ static int ext4_do_update_inode(handle_t *handle,
 
        /* For fields not not tracking in the in-memory inode,
         * initialise them to zero for new inodes. */
-       if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
+       if (ei->i_state & EXT4_STATE_NEW)
                memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
 
        ext4_get_inode_flags(ei);
@@ -5179,7 +5103,7 @@ static int ext4_do_update_inode(handle_t *handle,
                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
                        sb->s_dirt = 1;
                        ext4_handle_sync(handle);
-                       err = ext4_handle_dirty_metadata(handle, NULL,
+                       err = ext4_handle_dirty_metadata(handle, inode,
                                        EXT4_SB(sb)->s_sbh);
                }
        }
@@ -5208,10 +5132,10 @@ static int ext4_do_update_inode(handle_t *handle,
        }
 
        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-       rc = ext4_handle_dirty_metadata(handle, NULL, bh);
+       rc = ext4_handle_dirty_metadata(handle, inode, bh);
        if (!err)
                err = rc;
-       ext4_clear_inode_state(inode, EXT4_STATE_NEW);
+       ei->i_state &= ~EXT4_STATE_NEW;
 
        ext4_update_inode_fsync_trans(handle, inode, 0);
 out_brelse:
@@ -5276,7 +5200,7 @@ int ext4_write_inode(struct inode *inode, int wait)
        } else {
                struct ext4_iloc iloc;
 
-               err = __ext4_get_inode_loc(inode, &iloc, 0);
+               err = ext4_get_inode_loc(inode, &iloc);
                if (err)
                        return err;
                if (wait)
@@ -5289,7 +5213,6 @@ int ext4_write_inode(struct inode *inode, int wait)
                                   (unsigned long long)iloc.bh->b_blocknr);
                        err = -EIO;
                }
-               brelse(iloc.bh);
        }
        return err;
 }
@@ -5356,7 +5279,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        if (attr->ia_valid & ATTR_SIZE) {
-               if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+               if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
                        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
                        if (attr->ia_size > sbi->s_bitmap_maxbytes) {
@@ -5367,9 +5290,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        if (S_ISREG(inode->i_mode) &&
-           attr->ia_valid & ATTR_SIZE &&
-           (attr->ia_size < inode->i_size ||
-            (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
+           attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
                handle_t *handle;
 
                handle = ext4_journal_start(inode, 3);
@@ -5400,9 +5321,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                                goto err_out;
                        }
                }
-               /* ext4_truncate will clear the flag */
-               if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
-                       ext4_truncate(inode);
        }
 
        rc = inode_setattr(inode, attr);
@@ -5477,7 +5395,7 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
 
 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 {
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
                return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
        return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
 }
@@ -5641,8 +5559,8 @@ static int ext4_expand_extra_isize(struct inode *inode,
        entry = IFIRST(header);
 
        /* No extended attributes present */
-       if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
-           header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+       if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
+               header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
                memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
                        new_extra_isize);
                EXT4_I(inode)->i_extra_isize = new_extra_isize;
@@ -5686,7 +5604,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
        err = ext4_reserve_inode_write(handle, inode, &iloc);
        if (ext4_handle_valid(handle) &&
            EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
-           !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
+           !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
                /*
                 * We need extra buffer credits since we may write into EA block
                 * with this same handle. If journal_extend fails, then it will
@@ -5700,8 +5618,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
                                                      sbi->s_want_extra_isize,
                                                      iloc, handle);
                        if (ret) {
-                               ext4_set_inode_state(inode,
-                                                    EXT4_STATE_NO_EXPAND);
+                               EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
                                if (mnt_count !=
                                        le16_to_cpu(sbi->s_es->s_mnt_count)) {
                                        ext4_warning(inode->i_sb, __func__,
@@ -5768,7 +5685,7 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode)
                        err = jbd2_journal_get_write_access(handle, iloc.bh);
                        if (!err)
                                err = ext4_handle_dirty_metadata(handle,
-                                                                NULL,
+                                                                inode,
                                                                 iloc.bh);
                        brelse(iloc.bh);
                }
@@ -5812,9 +5729,9 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
         */
 
        if (val)
-               ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+               EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
        else
-               ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+               EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
        ext4_set_aops(inode);
 
        jbd2_journal_unlock_updates(journal);
index bf5ae883b1bdc30ff55cbc616bab7c24f9d57dbb..b63d193126dbf759236a52f075af914bc921d40a 100644 (file)
@@ -92,15 +92,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        flags &= ~EXT4_EXTENTS_FL;
                }
 
-               if (flags & EXT4_EOFBLOCKS_FL) {
-                       /* we don't support adding EOFBLOCKS flag */
-                       if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
-                               err = -EOPNOTSUPP;
-                               goto flags_out;
-                       }
-               } else if (oldflags & EXT4_EOFBLOCKS_FL)
-                       ext4_truncate(inode);
-
                handle = ext4_journal_start(inode, 1);
                if (IS_ERR(handle)) {
                        err = PTR_ERR(handle);
@@ -258,8 +249,7 @@ setversion_out:
                if (me.moved_len > 0)
                        file_remove_suid(donor_filp);
 
-               if (copy_to_user((struct move_extent __user *)arg,
-                                &me, sizeof(me)))
+               if (copy_to_user((struct move_extent *)arg, &me, sizeof(me)))
                        err = -EFAULT;
 mext_out:
                fput(donor_filp);
@@ -373,30 +363,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case EXT4_IOC32_SETRSVSZ:
                cmd = EXT4_IOC_SETRSVSZ;
                break;
-       case EXT4_IOC32_GROUP_ADD: {
-               struct compat_ext4_new_group_input __user *uinput;
-               struct ext4_new_group_input input;
-               mm_segment_t old_fs;
-               int err;
-
-               uinput = compat_ptr(arg);
-               err = get_user(input.group, &uinput->group);
-               err |= get_user(input.block_bitmap, &uinput->block_bitmap);
-               err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
-               err |= get_user(input.inode_table, &uinput->inode_table);
-               err |= get_user(input.blocks_count, &uinput->blocks_count);
-               err |= get_user(input.reserved_blocks,
-                               &uinput->reserved_blocks);
-               if (err)
-                       return -EFAULT;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               err = ext4_ioctl(file, EXT4_IOC_GROUP_ADD,
-                                (unsigned long) &input);
-               set_fs(old_fs);
-               return err;
-       }
-       case EXT4_IOC_MOVE_EXT:
+       case EXT4_IOC_GROUP_ADD:
                break;
        default:
                return -ENOIOCTLCMD;
index 04e07e227278d0bc13b22fda9e5e9a04107e5b7b..7d7114818f8dfe7ec709cd1ac124eccc34ffbc6b 100644 (file)
@@ -658,27 +658,6 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
        }
 }
 
-/*
- * Cache the order of the largest free extent we have available in this block
- * group.
- */
-static void
-mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
-{
-       int i;
-       int bits;
-
-       grp->bb_largest_free_order = -1; /* uninit */
-
-       bits = sb->s_blocksize_bits + 1;
-       for (i = bits; i >= 0; i--) {
-               if (grp->bb_counters[i] > 0) {
-                       grp->bb_largest_free_order = i;
-                       break;
-               }
-       }
-}
-
 static noinline_for_stack
 void ext4_mb_generate_buddy(struct super_block *sb,
                                void *buddy, void *bitmap, ext4_group_t group)
@@ -721,7 +700,6 @@ void ext4_mb_generate_buddy(struct super_block *sb,
                 */
                grp->bb_free = free;
        }
-       mb_set_largest_free_order(sb, grp);
 
        clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
 
@@ -747,9 +725,6 @@ void ext4_mb_generate_buddy(struct super_block *sb,
  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
  * So it can have information regarding groups_per_page which
  * is blocks_per_page/2
- *
- * Locking note:  This routine takes the block group lock of all groups
- * for this page; do not hold this lock when calling this routine!
  */
 
 static int ext4_mb_init_cache(struct page *page, char *incore)
@@ -935,11 +910,6 @@ out:
        return err;
 }
 
-/*
- * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
- * block group lock of all groups for this page; do not hold the BG lock when
- * calling this routine!
- */
 static noinline_for_stack
 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
 {
@@ -1034,11 +1004,6 @@ err:
        return ret;
 }
 
-/*
- * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
- * block group lock of all groups for this page; do not hold the BG lock when
- * calling this routine!
- */
 static noinline_for_stack int
 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
                                        struct ext4_buddy *e4b)
@@ -1185,7 +1150,7 @@ err:
        return ret;
 }
 
-static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+static void ext4_mb_release_desc(struct ext4_buddy *e4b)
 {
        if (e4b->bd_bitmap_page)
                page_cache_release(e4b->bd_bitmap_page);
@@ -1335,7 +1300,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
                        buddy = buddy2;
                } while (1);
        }
-       mb_set_largest_free_order(sb, e4b->bd_info);
        mb_check_buddy(e4b);
 }
 
@@ -1464,7 +1428,6 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
                e4b->bd_info->bb_counters[ord]++;
                e4b->bd_info->bb_counters[ord]++;
        }
-       mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
 
        mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
        mb_check_buddy(e4b);
@@ -1655,7 +1618,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
        }
 
        ext4_unlock_group(ac->ac_sb, group);
-       ext4_mb_unload_buddy(e4b);
+       ext4_mb_release_desc(e4b);
 
        return 0;
 }
@@ -1711,7 +1674,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
                ext4_mb_use_best_found(ac, e4b);
        }
        ext4_unlock_group(ac->ac_sb, group);
-       ext4_mb_unload_buddy(e4b);
+       ext4_mb_release_desc(e4b);
 
        return 0;
 }
@@ -1860,22 +1823,16 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
        }
 }
 
-/* This is now called BEFORE we load the buddy bitmap. */
 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
                                ext4_group_t group, int cr)
 {
        unsigned free, fragments;
+       unsigned i, bits;
        int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
        struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
 
        BUG_ON(cr < 0 || cr >= 4);
-
-       /* We only do this if the grp has never been initialized */
-       if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
-               int ret = ext4_mb_init_group(ac->ac_sb, group);
-               if (ret)
-                       return 0;
-       }
+       BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
 
        free = grp->bb_free;
        fragments = grp->bb_fragments;
@@ -1888,16 +1845,17 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
        case 0:
                BUG_ON(ac->ac_2order == 0);
 
-               if (grp->bb_largest_free_order < ac->ac_2order)
-                       return 0;
-
                /* Avoid using the first bg of a flexgroup for data files */
                if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
                    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
                    ((group % flex_size) == 0))
                        return 0;
 
-               return 1;
+               bits = ac->ac_sb->s_blocksize_bits + 1;
+               for (i = ac->ac_2order; i <= bits; i++)
+                       if (grp->bb_counters[i] > 0)
+                               return 1;
+               break;
        case 1:
                if ((free / fragments) >= ac->ac_g_ex.fe_len)
                        return 1;
@@ -2008,7 +1966,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
        sbi = EXT4_SB(sb);
        ngroups = ext4_get_groups_count(sb);
        /* non-extent files are limited to low blocks/groups */
-       if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
+       if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL))
                ngroups = sbi->s_blockfile_groups;
 
        BUG_ON(ac->ac_status == AC_STATUS_FOUND);
@@ -2068,11 +2026,15 @@ repeat:
                group = ac->ac_g_ex.fe_group;
 
                for (i = 0; i < ngroups; group++, i++) {
+                       struct ext4_group_info *grp;
+                       struct ext4_group_desc *desc;
+
                        if (group == ngroups)
                                group = 0;
 
-                       /* This now checks without needing the buddy page */
-                       if (!ext4_mb_good_group(ac, group, cr))
+                       /* quick check to skip empty groups */
+                       grp = ext4_get_group_info(sb, group);
+                       if (grp->bb_free == 0)
                                continue;
 
                        err = ext4_mb_load_buddy(sb, group, &e4b);
@@ -2080,18 +2042,15 @@ repeat:
                                goto out;
 
                        ext4_lock_group(sb, group);
-
-                       /*
-                        * We need to check again after locking the
-                        * block group
-                        */
                        if (!ext4_mb_good_group(ac, group, cr)) {
+                               /* someone did allocation from this group */
                                ext4_unlock_group(sb, group);
-                               ext4_mb_unload_buddy(&e4b);
+                               ext4_mb_release_desc(&e4b);
                                continue;
                        }
 
                        ac->ac_groups_scanned++;
+                       desc = ext4_get_group_desc(sb, group, NULL);
                        if (cr == 0)
                                ext4_mb_simple_scan_group(ac, &e4b);
                        else if (cr == 1 &&
@@ -2101,7 +2060,7 @@ repeat:
                                ext4_mb_complex_scan_group(ac, &e4b);
 
                        ext4_unlock_group(sb, group);
-                       ext4_mb_unload_buddy(&e4b);
+                       ext4_mb_release_desc(&e4b);
 
                        if (ac->ac_status != AC_STATUS_CONTINUE)
                                break;
@@ -2191,7 +2150,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
        ext4_lock_group(sb, group);
        memcpy(&sg, ext4_get_group_info(sb, group), i);
        ext4_unlock_group(sb, group);
-       ext4_mb_unload_buddy(&e4b);
+       ext4_mb_release_desc(&e4b);
 
        seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
                        sg.info.bb_fragments, sg.info.bb_first_free);
@@ -2298,7 +2257,6 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
        INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
        init_rwsem(&meta_group_info[i]->alloc_sem);
        meta_group_info[i]->bb_free_root.rb_node = NULL;
-       meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
 
 #ifdef DOUBLE_CHECK
        {
@@ -2579,23 +2537,6 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
                mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
                         entry->count, entry->group, entry);
 
-               if (test_opt(sb, DISCARD)) {
-                       int ret;
-                       ext4_fsblk_t discard_block;
-
-                       discard_block = entry->start_blk +
-                               ext4_group_first_block_no(sb, entry->group);
-                       trace_ext4_discard_blocks(sb,
-                                       (unsigned long long)discard_block,
-                                       entry->count);
-                       ret = sb_issue_discard(sb, discard_block, entry->count);
-                       if (ret == EOPNOTSUPP) {
-                               ext4_warning(sb, __func__,
-                                       "discard not supported, disabling");
-                               clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
-                       }
-               }
-
                err = ext4_mb_load_buddy(sb, entry->group, &e4b);
                /* we expect to find existing buddy because it's pinned */
                BUG_ON(err != 0);
@@ -2617,8 +2558,21 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
                        page_cache_release(e4b.bd_bitmap_page);
                }
                ext4_unlock_group(sb, entry->group);
+               if (test_opt(sb, DISCARD)) {
+                       ext4_fsblk_t discard_block;
+                       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+                       discard_block = (ext4_fsblk_t)entry->group *
+                                               EXT4_BLOCKS_PER_GROUP(sb)
+                                       + entry->start_blk
+                                       + le32_to_cpu(es->s_first_data_block);
+                       trace_ext4_discard_blocks(sb,
+                                       (unsigned long long)discard_block,
+                                       entry->count);
+                       sb_issue_discard(sb, discard_block, entry->count);
+               }
                kmem_cache_free(ext4_free_ext_cachep, entry);
-               ext4_mb_unload_buddy(&e4b);
+               ext4_mb_release_desc(&e4b);
        }
 
        mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
@@ -2801,6 +2755,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
                /* release all the reserved blocks if non delalloc */
                percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
+       else {
+               percpu_counter_sub(&sbi->s_dirtyblocks_counter,
+                                               ac->ac_b_ex.fe_len);
+               /* convert reserved quota blocks to real quota blocks */
+               vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+       }
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -3176,7 +3136,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
                        continue;
 
                /* non-extent files can't have physical blocks past 2^32 */
-               if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
+               if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL) &&
                        pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
                        continue;
 
@@ -3755,7 +3715,7 @@ out:
        ext4_unlock_group(sb, group);
        if (ac)
                kmem_cache_free(ext4_ac_cachep, ac);
-       ext4_mb_unload_buddy(&e4b);
+       ext4_mb_release_desc(&e4b);
        put_bh(bitmap_bh);
        return free;
 }
@@ -3859,7 +3819,7 @@ repeat:
                if (bitmap_bh == NULL) {
                        ext4_error(sb, __func__, "Error in reading block "
                                        "bitmap for %u", group);
-                       ext4_mb_unload_buddy(&e4b);
+                       ext4_mb_release_desc(&e4b);
                        continue;
                }
 
@@ -3868,7 +3828,7 @@ repeat:
                ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
                ext4_unlock_group(sb, group);
 
-               ext4_mb_unload_buddy(&e4b);
+               ext4_mb_release_desc(&e4b);
                put_bh(bitmap_bh);
 
                list_del(&pa->u.pa_tmp_list);
@@ -3984,7 +3944,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
 
        /* don't use group allocation for large files */
        size = max(size, isize);
-       if (size > sbi->s_mb_stream_request) {
+       if (size >= sbi->s_mb_stream_request) {
                ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
                return;
        }
@@ -4132,7 +4092,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
                ext4_mb_release_group_pa(&e4b, pa, ac);
                ext4_unlock_group(sb, group);
 
-               ext4_mb_unload_buddy(&e4b);
+               ext4_mb_release_desc(&e4b);
                list_del(&pa->u.pa_tmp_list);
                call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
        }
@@ -4634,7 +4594,7 @@ do_more:
                atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
        }
 
-       ext4_mb_unload_buddy(&e4b);
+       ext4_mb_release_desc(&e4b);
 
        *freed += count;
 
index ceb9d41cd7cf4c01a391f60038bd4fa9b9a0314a..0ca811061bc72eca18b46ca025b660f32ce87138 100644 (file)
@@ -221,6 +221,8 @@ struct ext4_buddy {
 #define EXT4_MB_BITMAP(e4b)    ((e4b)->bd_bitmap)
 #define EXT4_MB_BUDDY(e4b)     ((e4b)->bd_buddy)
 
+#define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
+
 static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
                                        struct ext4_free_extent *fex)
 {
index 7901f133f967cad48e99718d6afcc71665050bc6..864614974536f094e200d4dea099391548b07913 100644 (file)
@@ -357,12 +357,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
         * happened after we started the migrate. We need to
         * fail the migrate
         */
-       if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
+       if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) {
                retval = -EAGAIN;
                up_write(&EXT4_I(inode)->i_data_sem);
                goto err_out;
        } else
-               ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
        /*
         * We have the extent map build with the tmp inode.
         * Now copy the i_data across
@@ -465,7 +465,7 @@ int ext4_ext_migrate(struct inode *inode)
         */
        if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
                                       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
-           (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+           (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
                return -EINVAL;
 
        if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
@@ -494,10 +494,14 @@ int ext4_ext_migrate(struct inode *inode)
        }
        i_size_write(tmp_inode, i_size_read(inode));
        /*
-        * Set the i_nlink to zero so it will be deleted later
-        * when we drop inode reference.
+        * We don't want the inode to be reclaimed
+        * if we got interrupted in between. We have
+        * this tmp inode carrying reference to the
+        * data blocks of the original file. We set
+        * the i_nlink to zero at the last stage after
+        * switching the original file to extent format
         */
-       tmp_inode->i_nlink = 0;
+       tmp_inode->i_nlink = 1;
 
        ext4_ext_tree_init(handle, tmp_inode);
        ext4_orphan_add(handle, tmp_inode);
@@ -520,20 +524,10 @@ int ext4_ext_migrate(struct inode *inode)
         * allocation.
         */
        down_read((&EXT4_I(inode)->i_data_sem));
-       ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
+       EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE;
        up_read((&EXT4_I(inode)->i_data_sem));
 
        handle = ext4_journal_start(inode, 1);
-       if (IS_ERR(handle)) {
-               /*
-                * It is impossible to update on-disk structures without
-                * a handle, so just rollback in-core changes and live other
-                * work to orphan_list_cleanup()
-                */
-               ext4_orphan_del(NULL, tmp_inode);
-               retval = PTR_ERR(handle);
-               goto out;
-       }
 
        ei = EXT4_I(inode);
        i_data = ei->i_data;
@@ -615,8 +609,15 @@ err_out:
 
        /* Reset the extent details */
        ext4_ext_tree_init(handle, tmp_inode);
+
+       /*
+        * Set the i_nlink to zero so that
+        * generic_drop_inode really deletes the
+        * inode
+        */
+       tmp_inode->i_nlink = 0;
+
        ext4_journal_stop(handle);
-out:
        unlock_new_inode(tmp_inode);
        iput(tmp_inode);
 
index a73ed781752dd689d1312399f33a4f939fdd646e..f5b03a132a2ac1ff195a9c46d8e1427a8f0eac79 100644 (file)
@@ -252,7 +252,6 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
                }
 
                o_start->ee_len = start_ext->ee_len;
-               eblock = le32_to_cpu(start_ext->ee_block);
                new_flag = 1;
 
        } else if (start_ext->ee_len && new_ext->ee_len &&
@@ -263,7 +262,6 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
                 * orig  |------------------------------|
                 */
                o_start->ee_len = start_ext->ee_len;
-               eblock = le32_to_cpu(start_ext->ee_block);
                new_flag = 1;
 
        } else if (!start_ext->ee_len && new_ext->ee_len &&
@@ -477,6 +475,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
        struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
        struct ext4_extent new_ext, start_ext, end_ext;
        ext4_lblk_t new_ext_end;
+       ext4_fsblk_t new_phys_end;
        int oext_alen, new_ext_alen, end_ext_alen;
        int depth = ext_depth(orig_inode);
        int ret;
@@ -490,6 +489,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
        new_ext.ee_len = dext->ee_len;
        new_ext_alen = ext4_ext_get_actual_len(&new_ext);
        new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
+       new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1;
 
        /*
         * Case: original extent is first
@@ -502,7 +502,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
                le32_to_cpu(oext->ee_block) + oext_alen) {
                start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
                                               le32_to_cpu(oext->ee_block));
-               start_ext.ee_block = oext->ee_block;
                copy_extent_status(oext, &start_ext);
        } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
                prev_ext = oext - 1;
@@ -516,7 +515,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
                        start_ext.ee_len = cpu_to_le16(
                                ext4_ext_get_actual_len(prev_ext) +
                                new_ext_alen);
-                       start_ext.ee_block = oext->ee_block;
                        copy_extent_status(prev_ext, &start_ext);
                        new_ext.ee_len = 0;
                }
@@ -930,7 +928,7 @@ out2:
 }
 
 /**
- * mext_check_arguments - Check whether move extent can be done
+ * mext_check_argumants - Check whether move extent can be done
  *
  * @orig_inode:                original inode
  * @donor_inode:       donor inode
@@ -951,6 +949,14 @@ mext_check_arguments(struct inode *orig_inode,
        unsigned int blkbits = orig_inode->i_blkbits;
        unsigned int blocksize = 1 << blkbits;
 
+       /* Regular file check */
+       if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
+               ext4_debug("ext4 move extent: The argument files should be "
+                       "regular file [ino:orig %lu, donor %lu]\n",
+                       orig_inode->i_ino, donor_inode->i_ino);
+               return -EINVAL;
+       }
+
        if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
                ext4_debug("ext4 move extent: suid or sgid is set"
                           " to donor file [ino:orig %lu, donor %lu]\n",
@@ -958,9 +964,6 @@ mext_check_arguments(struct inode *orig_inode,
                return -EINVAL;
        }
 
-       if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
-               return -EPERM;
-
        /* Ext4 move extent does not support swapfile */
        if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
                ext4_debug("ext4 move extent: The argument files should "
@@ -978,11 +981,11 @@ mext_check_arguments(struct inode *orig_inode,
        }
 
        /* Ext4 move extent supports only extent based file */
-       if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
+       if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) {
                ext4_debug("ext4 move extent: orig file is not extents "
                        "based file [ino:orig %lu]\n", orig_inode->i_ino);
                return -EOPNOTSUPP;
-       } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
+       } else if (!(EXT4_I(donor_inode)->i_flags & EXT4_EXTENTS_FL)) {
                ext4_debug("ext4 move extent: donor file is not extents "
                        "based file [ino:donor %lu]\n", donor_inode->i_ino);
                return -EOPNOTSUPP;
@@ -1201,14 +1204,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
                return -EINVAL;
        }
 
-       /* Regular file check */
-       if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
-               ext4_debug("ext4 move extent: The argument files should be "
-                       "regular file [ino:orig %lu, donor %lu]\n",
-                       orig_inode->i_ino, donor_inode->i_ino);
-               return -EINVAL;
-       }
-
        /* Protect orig and donor inodes against a truncate */
        ret1 = mext_inode_double_lock(orig_inode, donor_inode);
        if (ret1 < 0)
index c3b6ad0fcc46628e978869f80d5ef343b904d0d5..17a17e10dd605198ddfe2dfed530d4cfff6812c1 100644 (file)
@@ -660,7 +660,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
        dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", 
                       start_hash, start_minor_hash));
        dir = dir_file->f_path.dentry->d_inode;
-       if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
+       if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
                hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
                if (hinfo.hash_version <= DX_HASH_TEA)
                        hinfo.hash_version +=
@@ -805,7 +805,7 @@ static void ext4_update_dx_flag(struct inode *inode)
 {
        if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
                                     EXT4_FEATURE_COMPAT_DIR_INDEX))
-               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+               EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL;
 }
 
 /*
@@ -1424,7 +1424,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
                brelse(bh);
                return retval;
        }
-       ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
+       EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
        data1 = bh2->b_data;
 
        memcpy (data1, de, len);
@@ -1497,7 +1497,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                retval = ext4_dx_add_entry(handle, dentry, inode);
                if (!retval || (retval != ERR_BAD_DX_DIR))
                        return retval;
-               ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+               EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL;
                dx_fallback++;
                ext4_mark_inode_dirty(handle, dir);
        }
@@ -1525,8 +1525,6 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
        de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
        retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
        brelse(bh);
-       if (retval == 0)
-               ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
        return retval;
 }
 
@@ -2022,18 +2020,11 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
        err = ext4_reserve_inode_write(handle, inode, &iloc);
        if (err)
                goto out_unlock;
-       /*
-        * Due to previous errors inode may be already a part of on-disk
-        * orphan list. If so skip on-disk list modification.
-        */
-       if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
-               (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
-                       goto mem_insert;
 
        /* Insert this inode at the head of the on-disk orphan list... */
        NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
        EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh);
        rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
        if (!err)
                err = rc;
@@ -2046,7 +2037,6 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
         *
         * This is safe: on error we're going to ignore the orphan list
         * anyway on the next recovery. */
-mem_insert:
        if (!err)
                list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
 
@@ -2106,7 +2096,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
                if (err)
                        goto out_brelse;
                sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
+               err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh);
        } else {
                struct ext4_iloc iloc2;
                struct inode *i_prev =
@@ -2294,7 +2284,7 @@ retry:
                }
        } else {
                /* clear the extent format for fast symlink */
-               ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
+               EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
                inode->i_op = &ext4_fast_symlink_inode_operations;
                memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
                inode->i_size = l-1;
index 433ea27e265299389baddbe900b5bd4ce10b70b9..3b2c5541d8a686fe7c780a512c0acd3e6771b3ab 100644 (file)
@@ -930,8 +930,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
        percpu_counter_add(&sbi->s_freeinodes_counter,
                           EXT4_INODES_PER_GROUP(sb));
 
-       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
-           sbi->s_log_groups_per_flex) {
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
                ext4_group_t flex_group;
                flex_group = ext4_flex_group(sbi, input->group);
                atomic_add(input->free_blocks_count,
index f27e045df7a6e502c9d6d92389a8062eeb8a0e24..92943f2ca2abcb036f8cad4681d0124ca44d78a0 100644 (file)
@@ -227,7 +227,6 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
        if (sb->s_flags & MS_RDONLY)
                return ERR_PTR(-EROFS);
 
-       vfs_check_frozen(sb, SB_FREEZE_TRANS);
        /* Special case here: if the journal has aborted behind our
         * backs (eg. EIO in the commit thread), then we still need to
         * take the FS itself readonly cleanly. */
@@ -703,7 +702,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->i_reserved_data_blocks = 0;
        ei->i_reserved_meta_blocks = 0;
        ei->i_allocated_meta_blocks = 0;
-       ei->i_da_metadata_calc_len = 0;
        ei->i_delalloc_reserved_flag = 0;
        spin_lock_init(&(ei->i_block_reservation_lock));
 #ifdef CONFIG_QUOTA
@@ -877,8 +875,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
        seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
        if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
                seq_puts(seq, ",journal_async_commit");
-       else if (test_opt(sb, JOURNAL_CHECKSUM))
-               seq_puts(seq, ",journal_checksum");
        if (test_opt(sb, NOBH))
                seq_puts(seq, ",nobh");
        if (test_opt(sb, I_VERSION))
@@ -1218,11 +1214,6 @@ static int parse_options(char *options, struct super_block *sb,
                if (!*p)
                        continue;
 
-               /*
-                * Initialize args struct so we know whether arg was
-                * found; some options take optional arguments.
-                */
-               args[0].to = args[0].from = 0;
                token = match_token(p, tokens, args);
                switch (token) {
                case Opt_bsd_df:
@@ -1508,11 +1499,10 @@ set_qf_format:
                        clear_opt(sbi->s_mount_opt, BARRIER);
                        break;
                case Opt_barrier:
-                       if (args[0].from) {
-                               if (match_int(&args[0], &option))
-                                       return 0;
-                       } else
-                               option = 1;     /* No argument, default to 1 */
+                       if (match_int(&args[0], &option)) {
+                               set_opt(sbi->s_mount_opt, BARRIER);
+                               break;
+                       }
                        if (option)
                                set_opt(sbi->s_mount_opt, BARRIER);
                        else
@@ -1585,11 +1575,10 @@ set_qf_format:
                        set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
                        break;
                case Opt_auto_da_alloc:
-                       if (args[0].from) {
-                               if (match_int(&args[0], &option))
-                                       return 0;
-                       } else
-                               option = 1;     /* No argument, default to 1 */
+                       if (match_int(&args[0], &option)) {
+                               clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
+                               break;
+                       }
                        if (option)
                                clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
                        else
@@ -2704,6 +2693,24 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
+       err = percpu_counter_init(&sbi->s_freeblocks_counter,
+                       ext4_count_free_blocks(sb));
+       if (!err) {
+               err = percpu_counter_init(&sbi->s_freeinodes_counter,
+                               ext4_count_free_inodes(sb));
+       }
+       if (!err) {
+               err = percpu_counter_init(&sbi->s_dirs_counter,
+                               ext4_count_dirs(sb));
+       }
+       if (!err) {
+               err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
+       }
+       if (err) {
+               ext4_msg(sb, KERN_ERR, "insufficient memory");
+               goto failed_mount3;
+       }
+
        sbi->s_stripe = ext4_get_stripe_size(sbi);
        sbi->s_max_writeback_mb_bump = 128;
 
@@ -2823,20 +2830,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
 
 no_journal:
-       err = percpu_counter_init(&sbi->s_freeblocks_counter,
-                                 ext4_count_free_blocks(sb));
-       if (!err)
-               err = percpu_counter_init(&sbi->s_freeinodes_counter,
-                                         ext4_count_free_inodes(sb));
-       if (!err)
-               err = percpu_counter_init(&sbi->s_dirs_counter,
-                                         ext4_count_dirs(sb));
-       if (!err)
-               err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
-       if (err) {
-               ext4_msg(sb, KERN_ERR, "insufficient memory");
-               goto failed_mount_wq;
-       }
+
        if (test_opt(sb, NOBH)) {
                if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
                        ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
@@ -2911,7 +2905,7 @@ no_journal:
        err = ext4_setup_system_zone(sb);
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to initialize system "
-                        "zone (%d)", err);
+                        "zone (%d)\n", err);
                goto failed_mount4;
        }
 
@@ -2969,10 +2963,6 @@ failed_mount_wq:
                jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
        }
-       percpu_counter_destroy(&sbi->s_freeblocks_counter);
-       percpu_counter_destroy(&sbi->s_freeinodes_counter);
-       percpu_counter_destroy(&sbi->s_dirs_counter);
-       percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
 failed_mount3:
        if (sbi->s_flex_groups) {
                if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -2980,6 +2970,10 @@ failed_mount3:
                else
                        kfree(sbi->s_flex_groups);
        }
+       percpu_counter_destroy(&sbi->s_freeblocks_counter);
+       percpu_counter_destroy(&sbi->s_freeinodes_counter);
+       percpu_counter_destroy(&sbi->s_dirs_counter);
+       percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
 failed_mount2:
        for (i = 0; i < db_count; i++)
                brelse(sbi->s_group_desc[i]);
@@ -3396,10 +3390,8 @@ int ext4_force_commit(struct super_block *sb)
                return 0;
 
        journal = EXT4_SB(sb)->s_journal;
-       if (journal) {
-               vfs_check_frozen(sb, SB_FREEZE_TRANS);
+       if (journal)
                ret = ext4_journal_force_commit(journal);
-       }
 
        return ret;
 }
@@ -3448,16 +3440,18 @@ static int ext4_freeze(struct super_block *sb)
         * the journal.
         */
        error = jbd2_journal_flush(journal);
-       if (error < 0)
-               goto out;
+       if (error < 0) {
+       out:
+               jbd2_journal_unlock_updates(journal);
+               return error;
+       }
 
        /* Journal blocked and flushed, clear needs_recovery flag. */
        EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
        error = ext4_commit_super(sb, 1);
-out:
-       /* we rely on s_frozen to stop further updates */
-       jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
-       return error;
+       if (error)
+               goto out;
+       return 0;
 }
 
 /*
@@ -3474,6 +3468,7 @@ static int ext4_unfreeze(struct super_block *sb)
        EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
        ext4_commit_super(sb, 1);
        unlock_super(sb);
+       jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
        return 0;
 }
 
@@ -4006,7 +4001,6 @@ static int __init init_ext4_fs(void)
 {
        int err;
 
-       ext4_check_flag_values();
        err = init_ext4_system_zone();
        if (err)
                return err;
index 4de7d0a75c427b385bafcbb120127ce88532895e..025701926f9aed63cee73c530ff718a41bdd3946 100644 (file)
@@ -267,7 +267,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
        void *end;
        int error;
 
-       if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
+       if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
                return -ENODATA;
        error = ext4_get_inode_loc(inode, &iloc);
        if (error)
@@ -393,7 +393,7 @@ ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
        void *end;
        int error;
 
-       if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
+       if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
                return 0;
        error = ext4_get_inode_loc(inode, &iloc);
        if (error)
@@ -816,7 +816,7 @@ inserted:
                                                EXT4_I(inode)->i_block_group);
 
                        /* non-extent files can't have physical blocks past 2^32 */
-                       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+                       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
                                goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
 
                        block = ext4_new_meta_blocks(handle, inode,
@@ -824,7 +824,7 @@ inserted:
                        if (error)
                                goto cleanup;
 
-                       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+                       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
                                BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
 
                        ea_idebug(inode, "creating block %d", block);
@@ -903,7 +903,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
        is->s.base = is->s.first = IFIRST(header);
        is->s.here = is->s.first;
        is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
-       if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
+       if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
                error = ext4_xattr_check_names(IFIRST(header), is->s.end);
                if (error)
                        return error;
@@ -935,10 +935,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
        header = IHDR(inode, ext4_raw_inode(&is->iloc));
        if (!IS_LAST_ENTRY(s->first)) {
                header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
-               ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+               EXT4_I(inode)->i_state |= EXT4_STATE_XATTR;
        } else {
                header->h_magic = cpu_to_le32(0);
-               ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR;
        }
        return 0;
 }
@@ -981,8 +981,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
        if (strlen(name) > 255)
                return -ERANGE;
        down_write(&EXT4_I(inode)->xattr_sem);
-       no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
-       ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
+       no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND;
+       EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
 
        error = ext4_get_inode_loc(inode, &is.iloc);
        if (error)
@@ -992,10 +992,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
        if (error)
                goto cleanup;
 
-       if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
+       if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
                struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
                memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
-               ext4_clear_inode_state(inode, EXT4_STATE_NEW);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW;
        }
 
        error = ext4_xattr_ibody_find(inode, &i, &is);
@@ -1047,7 +1047,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
                ext4_xattr_update_super_block(handle, inode->i_sb);
                inode->i_ctime = ext4_current_time(inode);
                if (!value)
-                       ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+                       EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
                error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
                /*
                 * The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1062,7 +1062,7 @@ cleanup:
        brelse(is.iloc.bh);
        brelse(bs.bh);
        if (no_expand == 0)
-               ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
        up_write(&EXT4_I(inode)->xattr_sem);
        return error;
 }
@@ -1327,8 +1327,6 @@ retry:
                        goto cleanup;
                kfree(b_entry_name);
                kfree(buffer);
-               b_entry_name = NULL;
-               buffer = NULL;
                brelse(is->iloc.bh);
                kfree(is);
                kfree(bs);
index 72646e2c0f48c585b96550f084e2ddf6eed06f07..f565f24019b585c0d52d6934f4e3c32f22b6f420 100644 (file)
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
 {
        struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
        wchar_t *ip, *ext_start, *end, *name_start;
-       unsigned char base[9], ext[4], buf[5], *p;
+       unsigned char base[9], ext[4], buf[8], *p;
        unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
        int chl, chi;
        int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
                        return 0;
        }
 
-       i = jiffies;
+       i = jiffies & 0xffff;
        sz = (jiffies >> 16) & 0x7;
        if (baselen > 2) {
                baselen = numtail2_baselen;
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
        name_res[baselen + 4] = '~';
        name_res[baselen + 5] = '1' + sz;
        while (1) {
-               snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
+               sprintf(buf, "%04X", i);
                memcpy(&name_res[baselen], buf, 4);
                if (vfat_find_form(dir, name_res) < 0)
                        break;
index 666c7ce1fc4150af27d5d0b6f7b4cc9196c5c395..8eb44042e00934dbe1fe13c2af1fd659ddd8c296 100644 (file)
@@ -420,9 +420,7 @@ retry:
                        continue;
                if (!(f->f_mode & FMODE_WRITE))
                        continue;
-               spin_lock(&f->f_lock);
                f->f_mode &= ~FMODE_WRITE;
-               spin_unlock(&f->f_lock);
                if (file_check_writeable(f) != 0)
                        continue;
                file_release_write(f);
index 60a53615ed027ff9fbab55e32cf52f58e1e466ce..b1ff16fec7f2c4630e8478afe5147b0105b71114 100644 (file)
@@ -859,12 +859,6 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
        unsigned long expired;
        long nr_pages;
 
-       /*
-        * When set to zero, disable periodic writeback
-        */
-       if (!dirty_writeback_interval)
-               return 0;
-
        expired = wb->last_old_flush +
                        msecs_to_jiffies(dirty_writeback_interval * 10);
        if (time_before(jiffies, expired))
@@ -960,12 +954,8 @@ int bdi_writeback_task(struct bdi_writeback *wb)
                                break;
                }
 
-               if (dirty_writeback_interval) {
-                       wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
-                       schedule_timeout_interruptible(wait_jiffies);
-               } else
-                       schedule();
-
+               wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
+               schedule_timeout_interruptible(wait_jiffies);
                try_to_freeze();
        }
 
@@ -1222,23 +1212,6 @@ void writeback_inodes_sb(struct super_block *sb)
 }
 EXPORT_SYMBOL(writeback_inodes_sb);
 
-/**
- * writeback_inodes_sb_if_idle -       start writeback if none underway
- * @sb: the superblock
- *
- * Invoke writeback_inodes_sb if no writeback is currently underway.
- * Returns 1 if writeback was started, 0 if not.
- */
-int writeback_inodes_sb_if_idle(struct super_block *sb)
-{
-       if (!writeback_in_progress(sb->s_bdi)) {
-               writeback_inodes_sb(sb);
-               return 1;
-       } else
-               return 0;
-}
-EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
-
 /**
  * sync_inodes_sb      -       sync sb inode pages
  * @sb: the superblock
index 650546f8612d44d1200e2530ec704f52ac3b592b..51d9e33d634f4fb40f28fc00276123e1dc8f13c0 100644 (file)
@@ -1158,14 +1158,6 @@ __acquires(&fc->lock)
        }
 }
 
-static void end_queued_requests(struct fuse_conn *fc)
-{
-       fc->max_background = UINT_MAX;
-       flush_bg_queue(fc);
-       end_requests(fc, &fc->pending);
-       end_requests(fc, &fc->processing);
-}
-
 /*
  * Abort all requests.
  *
@@ -1192,7 +1184,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
                fc->connected = 0;
                fc->blocked = 0;
                end_io_requests(fc);
-               end_queued_requests(fc);
+               end_requests(fc, &fc->pending);
+               end_requests(fc, &fc->processing);
                wake_up_all(&fc->waitq);
                wake_up_all(&fc->blocked_waitq);
                kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1207,9 +1200,8 @@ int fuse_dev_release(struct inode *inode, struct file *file)
        if (fc) {
                spin_lock(&fc->lock);
                fc->connected = 0;
-               fc->blocked = 0;
-               end_queued_requests(fc);
-               wake_up_all(&fc->blocked_waitq);
+               end_requests(fc, &fc->pending);
+               end_requests(fc, &fc->processing);
                spin_unlock(&fc->lock);
                fuse_conn_put(fc);
        }
index cbd2214edc15b2c4cd65e40f7de6538d1169f982..a9f5e137f1d31547f86bbfdb87df11c07daf963f 100644 (file)
@@ -134,7 +134,6 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
 void fuse_finish_open(struct inode *inode, struct file *file)
 {
        struct fuse_file *ff = file->private_data;
-       struct fuse_conn *fc = get_fuse_conn(inode);
 
        if (ff->open_flags & FOPEN_DIRECT_IO)
                file->f_op = &fuse_direct_io_file_operations;
@@ -142,15 +141,6 @@ void fuse_finish_open(struct inode *inode, struct file *file)
                invalidate_inode_pages2(inode->i_mapping);
        if (ff->open_flags & FOPEN_NONSEEKABLE)
                nonseekable_open(inode, file);
-       if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
-               struct fuse_inode *fi = get_fuse_inode(inode);
-
-               spin_lock(&fc->lock);
-               fi->attr_version = ++fc->attr_version;
-               i_size_write(inode, 0);
-               spin_unlock(&fc->lock);
-               fuse_invalidate_attr(inode);
-       }
 }
 
 int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
index 2168da1216475b2b9d3a9611730b77c89a3022d4..3fc4e3ac7d84efdc76b4ddbae91f82f90f8c257d 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/spinlock.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
-#include <linux/xattr.h>
 #include <linux/posix_acl.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/gfs2_ondisk.h>
 #include "trans.h"
 #include "util.h"
 
+#define ACL_ACCESS 1
+#define ACL_DEFAULT 0
+
+int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
+                         struct gfs2_ea_request *er, int *remove, mode_t *mode)
+{
+       struct posix_acl *acl;
+       int error;
+
+       error = gfs2_acl_validate_remove(ip, access);
+       if (error)
+               return error;
+
+       if (!er->er_data)
+               return -EINVAL;
+
+       acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
+       if (IS_ERR(acl))
+               return PTR_ERR(acl);
+       if (!acl) {
+               *remove = 1;
+               return 0;
+       }
+
+       error = posix_acl_valid(acl);
+       if (error)
+               goto out;
+
+       if (access) {
+               error = posix_acl_equiv_mode(acl, mode);
+               if (!error)
+                       *remove = 1;
+               else if (error > 0)
+                       error = 0;
+       }
+
+out:
+       posix_acl_release(acl);
+       return error;
+}
+
+int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
+{
+       if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
+               return -EOPNOTSUPP;
+       if (!is_owner_or_cap(&ip->i_inode))
+               return -EPERM;
+       if (S_ISLNK(ip->i_inode.i_mode))
+               return -EOPNOTSUPP;
+       if (!access && !S_ISDIR(ip->i_inode.i_mode))
+               return -EACCES;
+
+       return 0;
+}
+
 static int acl_get(struct gfs2_inode *ip, const char *name,
                   struct posix_acl **acl, struct gfs2_ea_location *el,
                   char **datap, unsigned int *lenp)
@@ -223,117 +277,3 @@ out_brelse:
        return error;
 }
 
-static int gfs2_acl_type(const char *name)
-{
-       if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
-               return ACL_TYPE_ACCESS;
-       if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
-               return ACL_TYPE_DEFAULT;
-       return -EINVAL;
-}
-
-static int gfs2_xattr_system_get(struct inode *inode, const char *name,
-                                void *buffer, size_t size)
-{
-       int type;
-
-       type = gfs2_acl_type(name);
-       if (type < 0)
-               return type;
-
-       return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
-}
-
-static int gfs2_set_mode(struct inode *inode, mode_t mode)
-{
-       int error = 0;
-
-       if (mode != inode->i_mode) {
-               struct iattr iattr;
-
-               iattr.ia_valid = ATTR_MODE;
-               iattr.ia_mode = mode;
-
-               error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
-       }
-
-       return error;
-}
-
-static int gfs2_xattr_system_set(struct inode *inode, const char *name,
-                                const void *value, size_t size, int flags)
-{
-       struct gfs2_sbd *sdp = GFS2_SB(inode);
-       struct posix_acl *acl = NULL;
-       int error = 0, type;
-
-       if (!sdp->sd_args.ar_posix_acl)
-               return -EOPNOTSUPP;
-
-       type = gfs2_acl_type(name);
-       if (type < 0)
-               return type;
-       if (flags & XATTR_CREATE)
-               return -EINVAL;
-       if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
-               return value ? -EACCES : 0;
-       if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
-               return -EPERM;
-       if (S_ISLNK(inode->i_mode))
-               return -EOPNOTSUPP;
-
-       if (!value)
-               goto set_acl;
-
-       acl = posix_acl_from_xattr(value, size);
-       if (!acl) {
-               /*
-                * acl_set_file(3) may request that we set default ACLs with
-                * zero length -- defend (gracefully) against that here.
-                */
-               goto out;
-       }
-       if (IS_ERR(acl)) {
-               error = PTR_ERR(acl);
-               goto out;
-       }
-
-       error = posix_acl_valid(acl);
-       if (error)
-               goto out_release;
-
-       error = -EINVAL;
-       if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
-               goto out_release;
-
-       if (type == ACL_TYPE_ACCESS) {
-               mode_t mode = inode->i_mode;
-               error = posix_acl_equiv_mode(acl, &mode);
-
-               if (error <= 0) {
-                       posix_acl_release(acl);
-                       acl = NULL;
-
-                       if (error < 0)
-                               return error;
-               }
-
-               error = gfs2_set_mode(inode, mode);
-               if (error)
-                       goto out_release;
-       }
-
-set_acl:
-       error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
-out_release:
-       posix_acl_release(acl);
-out:
-       return error;
-}
-
-struct xattr_handler gfs2_xattr_system_handler = {
-       .prefix = XATTR_SYSTEM_PREFIX,
-       .get    = gfs2_xattr_system_get,
-       .set    = gfs2_xattr_system_set,
-};
-
index cc954390b6daf3e633dd13d9bbb80982baf685a2..6751930bfb648e809eb90e028c0caa8458e1e5e8 100644 (file)
 #include "incore.h"
 
 #define GFS2_POSIX_ACL_ACCESS          "posix_acl_access"
+#define GFS2_POSIX_ACL_ACCESS_LEN      16
 #define GFS2_POSIX_ACL_DEFAULT         "posix_acl_default"
-#define GFS2_ACL_MAX_ENTRIES           25
+#define GFS2_POSIX_ACL_DEFAULT_LEN     17
 
-extern int gfs2_check_acl(struct inode *inode, int mask);
-extern int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
-extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
-extern struct xattr_handler gfs2_xattr_system_handler;
+#define GFS2_ACL_IS_ACCESS(name, len) \
+         ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \
+         !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len)))
+
+#define GFS2_ACL_IS_DEFAULT(name, len) \
+         ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
+         !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
+
+struct gfs2_ea_request;
+
+int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
+                         struct gfs2_ea_request *er,
+                         int *remove, mode_t *mode);
+int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
+int gfs2_check_acl(struct inode *inode, int mask);
+int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
+int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
 
 #endif /* __ACL_DOT_H__ */
index 0bb312961c92fb01d661ac273b0c0e4f32d140dc..297d7e5cebad8d283a9360ad86cf3a96a2028f02 100644 (file)
@@ -392,7 +392,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
        unsigned totlen = be16_to_cpu(dent->de_rec_len);
 
        if (gfs2_dirent_sentinel(dent))
-               actual = 0;
+               actual = GFS2_DIRENT_SIZE(0);
        if (totlen - actual >= required)
                return 1;
        return 0;
index b3fd1d8869464c22658dee350245e4d18d06a15f..4eb308aa32342f0f51eb00cafefda3ecbb6ada17 100644 (file)
@@ -218,11 +218,6 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
        if (error)
                goto out_drop_write;
 
-       error = -EACCES;
-       if (!is_owner_or_cap(inode))
-               goto out;
-
-       error = 0;
        flags = ip->i_diskflags;
        new_flags = (flags & ~mask) | (reqflags & mask);
        if ((new_flags ^ flags) == 0)
@@ -280,10 +275,8 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
 {
        struct inode *inode = filp->f_path.dentry->d_inode;
        u32 fsflags, gfsflags;
-
        if (get_user(fsflags, ptr))
                return -EFAULT;
-
        gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
        if (!S_ISDIR(inode->i_mode)) {
                if (gfsflags & GFS2_DIF_INHERIT_JDATA)
@@ -613,7 +606,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
+       if (__mandatory_lock(&ip->i_inode))
                return -ENOLCK;
 
        if (cmd == F_CANCELLK) {
index 6b803540951ef9c37c905bf1197d51c0f065425e..8a0f8ef6ee2700cc5ef62f5090b11d602b9f8b20 100644 (file)
@@ -1507,6 +1507,18 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
        return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
 }
 
+static int gfs2_xattr_system_get(struct inode *inode, const char *name,
+                                void *buffer, size_t size)
+{
+       return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
+}
+
+static int gfs2_xattr_system_set(struct inode *inode, const char *name,
+                                const void *value, size_t size, int flags)
+{
+       return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
+}
+
 static int gfs2_xattr_security_get(struct inode *inode, const char *name,
                                   void *buffer, size_t size)
 {
@@ -1531,6 +1543,12 @@ static struct xattr_handler gfs2_xattr_security_handler = {
        .set    = gfs2_xattr_security_set,
 };
 
+static struct xattr_handler gfs2_xattr_system_handler = {
+       .prefix = XATTR_SYSTEM_PREFIX,
+       .get    = gfs2_xattr_system_get,
+       .set    = gfs2_xattr_system_set,
+};
+
 struct xattr_handler *gfs2_xattr_handlers[] = {
        &gfs2_xattr_user_handler,
        &gfs2_xattr_security_handler,
index bd224eec9b072d5ebe0a075c57e203b9fde61c1d..4160afad6d00fc4b8812ac497d2a3bcac28efc60 100644 (file)
@@ -1913,7 +1913,7 @@ static void __init jbd_create_debugfs_entry(void)
 {
        jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
        if (jbd_debugfs_dir)
-               jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR,
+               jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO,
                                               jbd_debugfs_dir,
                                               &journal_enable_debug);
 }
index 886849370950f462627f0a9d2df692ec4d117d97..ca0f5eb62b20e450b38ef84b52c64bc2a214bc17 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/jbd2.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/blkdev.h>
 #include <trace/events/jbd2.h>
 
 /*
@@ -516,20 +515,6 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
        journal->j_tail_sequence = first_tid;
        journal->j_tail = blocknr;
        spin_unlock(&journal->j_state_lock);
-
-       /*
-        * If there is an external journal, we need to make sure that
-        * any data blocks that were recently written out --- perhaps
-        * by jbd2_log_do_checkpoint() --- are flushed out before we
-        * drop the transactions from the external journal.  It's
-        * unlikely this will be necessary, especially with a
-        * appropriately sized journal, but we need this to guarantee
-        * correctness.  Fortunately jbd2_cleanup_journal_tail()
-        * doesn't get called all that often.
-        */
-       if ((journal->j_fs_dev != journal->j_dev) &&
-           (journal->j_flags & JBD2_BARRIER))
-               blkdev_issue_flush(journal->j_fs_dev, NULL);
        if (!(journal->j_flags & JBD2_ABORT))
                jbd2_journal_update_superblock(journal, 1);
        return 0;
index 09ab6ac6a075ab8a5d491b26db81f283e32b99f4..8896c1d4febe590e1e6941d76f62833a04813201 100644 (file)
@@ -259,7 +259,6 @@ static int journal_submit_data_buffers(journal_t *journal,
                        ret = err;
                spin_lock(&journal->j_list_lock);
                J_ASSERT(jinode->i_transaction == commit_transaction);
-               commit_transaction->t_flushed_data_blocks = 1;
                jinode->i_flags &= ~JI_COMMIT_RUNNING;
                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
        }
@@ -709,17 +708,8 @@ start_journal_io:
                }
        }
 
-       /*
-        * If the journal is not located on the file system device,
-        * then we must flush the file system device before we issue
-        * the commit record
-        */
-       if (commit_transaction->t_flushed_data_blocks &&
-           (journal->j_fs_dev != journal->j_dev) &&
-           (journal->j_flags & JBD2_BARRIER))
-               blkdev_issue_flush(journal->j_fs_dev, NULL);
-
        /* Done it all: now write the commit record asynchronously. */
+
        if (JBD2_HAS_INCOMPAT_FEATURE(journal,
                                      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
                err = journal_submit_commit_record(journal, commit_transaction,
@@ -730,6 +720,13 @@ start_journal_io:
                        blkdev_issue_flush(journal->j_dev, NULL);
        }
 
+       /*
+        * This is the right place to wait for data buffers both for ASYNC
+        * and !ASYNC commit. If commit is ASYNC, we need to wait only after
+        * the commit block went to disk (which happens above). If commit is
+        * SYNC, we need to wait for data buffers before we start writing
+        * commit block, which happens below in such setting.
+        */
        err = journal_finish_inode_data_buffers(journal, commit_transaction);
        if (err) {
                printk(KERN_WARNING
index 17af879e6e9ec157528f3bcab4ad7c01436abc68..b7ca3a92a4dba0b23f555a75cac79fea1b6ae9e4 100644 (file)
@@ -2115,8 +2115,7 @@ static void __init jbd2_create_debugfs_entry(void)
 {
        jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
        if (jbd2_debugfs_dir)
-               jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME,
-                                              S_IRUGO | S_IWUSR,
+               jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO,
                                               jbd2_debugfs_dir,
                                               &jbd2_journal_enable_debug);
 }
index 1aba0039f1c995ab0909664cc325a917ee5d8c0b..7f24a0bb08ca0a25d31352e9611273de87d2ee1b 100644 (file)
@@ -81,7 +81,6 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
        struct inode *iplist[1];
        struct jfs_superblock *j_sb, *j_sb2;
        uint old_agsize;
-       int agsizechanged = 0;
        struct buffer_head *bh, *bh2;
 
        /* If the volume hasn't grown, get out now */
@@ -334,9 +333,6 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
         */
        if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
                goto error_out;
-
-       agsizechanged |= (bmp->db_agsize != old_agsize);
-
        /*
         * the map now has extended to cover additional nblocks:
         * dn_mapsize = oldMapsize + nblocks;
@@ -436,7 +432,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
         * will correctly identify the new ag);
         */
        /* if new AG size the same as old AG size, done! */
-       if (agsizechanged) {
+       if (bmp->db_agsize != old_agsize) {
                if ((rc = diExtendFS(ipimap, ipbmap)))
                        goto error_out;
 
index 8b0da9b108021a280d895ec5b26c860dd9a52351..fad364548bc9e3716b08dc0f6ca66a58d1f9b56c 100644 (file)
@@ -85,25 +85,46 @@ struct ea_buffer {
 #define EA_MALLOC      0x0008
 
 
-static int is_known_namespace(const char *name)
-{
-       if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
-           strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
-           strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
-           strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
-               return false;
-
-       return true;
-}
-
 /*
  * These three routines are used to recognize on-disk extended attributes
  * that are in a recognized namespace.  If the attribute is not recognized,
  * "os2." is prepended to the name
  */
-static int is_os2_xattr(struct jfs_ea *ea)
+static inline int is_os2_xattr(struct jfs_ea *ea)
 {
-       return !is_known_namespace(ea->name);
+       /*
+        * Check for "system."
+        */
+       if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) &&
+           !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+               return false;
+       /*
+        * Check for "user."
+        */
+       if ((ea->namelen >= XATTR_USER_PREFIX_LEN) &&
+           !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+               return false;
+       /*
+        * Check for "security."
+        */
+       if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) &&
+           !strncmp(ea->name, XATTR_SECURITY_PREFIX,
+                    XATTR_SECURITY_PREFIX_LEN))
+               return false;
+       /*
+        * Check for "trusted."
+        */
+       if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) &&
+           !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+               return false;
+       /*
+        * Add any other valid namespace prefixes here
+        */
+
+       /*
+        * We assume it's OS/2's flat namespace
+        */
+       return true;
 }
 
 static inline int name_size(struct jfs_ea *ea)
@@ -741,23 +762,13 @@ static int can_set_xattr(struct inode *inode, const char *name,
        if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
                return can_set_system_xattr(inode, name, value, value_len);
 
-       if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) {
-               /*
-                * This makes sure that we aren't trying to set an
-                * attribute in a different namespace by prefixing it
-                * with "os2."
-                */
-               if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN))
-                               return -EOPNOTSUPP;
-               return 0;
-       }
-
        /*
         * Don't allow setting an attribute in an unknown namespace.
         */
        if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
            strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
-           strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+           strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+           strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))
                return -EOPNOTSUPP;
 
        return 0;
@@ -939,8 +950,19 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
        int xattr_size;
        ssize_t size;
        int namelen = strlen(name);
+       char *os2name = NULL;
        char *value;
 
+       if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
+               os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
+                                 GFP_KERNEL);
+               if (!os2name)
+                       return -ENOMEM;
+               strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
+               name = os2name;
+               namelen -= XATTR_OS2_PREFIX_LEN;
+       }
+
        down_read(&JFS_IP(inode)->xattr_sem);
 
        xattr_size = ea_get(inode, &ea_buf, 0);
@@ -978,6 +1000,8 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
       out:
        up_read(&JFS_IP(inode)->xattr_sem);
 
+       kfree(os2name);
+
        return size;
 }
 
@@ -986,19 +1010,6 @@ ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
 {
        int err;
 
-       if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
-               /*
-                * skip past "os2." prefix
-                */
-               name += XATTR_OS2_PREFIX_LEN;
-               /*
-                * Don't allow retrieving properly prefixed attributes
-                * by prepending them with "os2."
-                */
-               if (is_known_namespace(name))
-                       return -EOPNOTSUPP;
-       }
-
        err = __jfs_getxattr(dentry->d_inode, name, data, buf_size);
 
        return err;
index ba36e93764c29233833a8fff22de5ad0bf4d4ead..219576c52d807e15b779d53be3a42dfa1baae9ae 100644 (file)
@@ -415,8 +415,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
  * unique inode values later for this filesystem, then you must take care
  * to pass it an appropriate max_reserved value to avoid collisions.
  */
-int simple_fill_super(struct super_block *s, unsigned long magic,
-                     struct tree_descr *files)
+int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
 {
        struct inode *inode;
        struct dentry *root;
index b0afbd427be7abe4f27a3833b5cde21ac5c7d538..a2b3c28a499db4f6cd82221a80120cce08a767ed 100644 (file)
@@ -828,17 +828,6 @@ fail:
        return PTR_ERR(dentry);
 }
 
-/*
- * This is a temporary kludge to deal with "automount" symlinks; proper
- * solution is to trigger them on follow_mount(), so that do_lookup()
- * would DTRT.  To be killed before 2.6.34-final.
- */
-static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
-{
-       return inode && unlikely(inode->i_op->follow_link) &&
-               ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
-}
-
 /*
  * Name resolution.
  * This is the basic name resolution function, turning a pathname into
@@ -975,7 +964,8 @@ last_component:
                if (err)
                        break;
                inode = next.dentry->d_inode;
-               if (follow_on_final(inode, lookup_flags)) {
+               if ((lookup_flags & LOOKUP_FOLLOW)
+                   && inode && inode->i_op->follow_link) {
                        err = do_follow_link(&next, nd);
                        if (err)
                                goto return_err;
index 2beb0fbe70ecc0863d6d17e7e58759c3ab766c84..bdc3cb4fd2220c6fde0f35159a902768a75d60f6 100644 (file)
@@ -1119,15 +1119,8 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
 {
        struct path path;
        int retval;
-       int lookup_flags = 0;
 
-       if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
-               return -EINVAL;
-
-       if (!(flags & UMOUNT_NOFOLLOW))
-               lookup_flags |= LOOKUP_FOLLOW;
-
-       retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
+       retval = user_path(name, &path);
        if (retval)
                goto out;
        retval = -EINVAL;
index 19cbbf763110a56aa14859b921492328ded25336..99ea196f071f04d81c82fbc42bdcb000b9f0694d 100644 (file)
@@ -273,7 +273,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
            sin1->sin6_scope_id != sin2->sin6_scope_id)
                return 0;
 
-       return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
+       return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr);
 }
 #else  /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
 static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
@@ -965,8 +965,6 @@ out_error:
 static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
 {
        target->flags = source->flags;
-       target->rsize = source->rsize;
-       target->wsize = source->wsize;
        target->acregmin = source->acregmin;
        target->acregmax = source->acregmax;
        target->acdirmin = source->acdirmin;
@@ -1285,8 +1283,7 @@ static int nfs4_init_server(struct nfs_server *server,
 
        /* Initialise the client representation from the mount data */
        server->flags = data->flags;
-       server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
-               NFS_CAP_POSIX_LOCK;
+       server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
        server->options = data->options;
 
        /* Get a client record */
index 7f237d243be50cc2bb850a82bcc8df77e4017d6d..09f383795174d065f36a413a6f698b467a93c57c 100644 (file)
@@ -68,10 +68,4 @@ static inline int nfs_inode_return_delegation(struct inode *inode)
 }
 #endif
 
-static inline int nfs_have_delegated_attributes(struct inode *inode)
-{
-       return nfs_have_delegation(inode, FMODE_READ) &&
-               !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
-}
-
 #endif
index a87cbd8bd0b9527f8e2b971ff3fd60c80c10c221..7cb298525eefd1a5ebcac1870ce08679447a3f2e 100644 (file)
@@ -837,8 +837,6 @@ out_zap_parent:
                /* If we have submounts, don't unhash ! */
                if (have_submounts(dentry))
                        goto out_valid;
-               if (dentry->d_flags & DCACHE_DISCONNECTED)
-                       goto out_valid;
                shrink_dcache_parent(dentry);
        }
        d_drop(dentry);
@@ -1027,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
                                res = NULL;
                                goto out;
                        /* This turned out not to be a regular file */
-                       case -EISDIR:
                        case -ENOTDIR:
                                goto no_open;
                        case -ELOOP:
                                if (!(nd->intent.open.flags & O_NOFOLLOW))
                                        goto no_open;
+                       /* case -EISDIR: */
                        /* case -EINVAL: */
                        default:
                                goto out;
@@ -1799,7 +1797,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
        cache = nfs_access_search_rbtree(inode, cred);
        if (cache == NULL)
                goto out;
-       if (!nfs_have_delegated_attributes(inode) &&
+       if (!nfs_have_delegation(inode, FMODE_READ) &&
            !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
                goto out_stale;
        res->jiffies = cache->jiffies;
index c1fd68bf337d63ffa85dd16435a26527704e5c68..f4d54ba97cc62653ffceedf568f4d4c987840682 100644 (file)
@@ -36,19 +36,6 @@ struct nfs_dns_ent {
 };
 
 
-static void nfs_dns_ent_update(struct cache_head *cnew,
-               struct cache_head *ckey)
-{
-       struct nfs_dns_ent *new;
-       struct nfs_dns_ent *key;
-
-       new = container_of(cnew, struct nfs_dns_ent, h);
-       key = container_of(ckey, struct nfs_dns_ent, h);
-
-       memcpy(&new->addr, &key->addr, key->addrlen);
-       new->addrlen = key->addrlen;
-}
-
 static void nfs_dns_ent_init(struct cache_head *cnew,
                struct cache_head *ckey)
 {
@@ -62,7 +49,8 @@ static void nfs_dns_ent_init(struct cache_head *cnew,
        new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
        if (new->hostname) {
                new->namelen = key->namelen;
-               nfs_dns_ent_update(cnew, ckey);
+               memcpy(&new->addr, &key->addr, key->addrlen);
+               new->addrlen = key->addrlen;
        } else {
                new->namelen = 0;
                new->addrlen = 0;
@@ -246,7 +234,7 @@ static struct cache_detail nfs_dns_resolve = {
        .cache_show = nfs_dns_show,
        .match = nfs_dns_match,
        .init = nfs_dns_ent_init,
-       .update = nfs_dns_ent_update,
+       .update = nfs_dns_ent_init,
        .alloc = nfs_dns_ent_alloc,
 };
 
index 9f83d9fe9a61af75574782c310d27ee78f03017a..393d40fd7eb9d8fd5dd5d784fc94b741d94e3912 100644 (file)
@@ -27,8 +27,6 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/aio.h>
-#include <linux/gfp.h>
-#include <linux/swap.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -486,19 +484,10 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
  */
 static int nfs_release_page(struct page *page, gfp_t gfp)
 {
-       struct address_space *mapping = page->mapping;
-
        dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
 
-       /* Only do I/O if gfp is a superset of GFP_KERNEL */
-       if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
-               int how = FLUSH_SYNC;
-
-               /* Don't let kswapd deadlock waiting for OOM RPC calls */
-               if (current_is_kswapd())
-                       how = 0;
-               nfs_commit_inode(mapping->host, how);
-       }
+       if (gfp & __GFP_WAIT)
+               nfs_wb_page(page->mapping->host, page);
        /* If PagePrivate() is set, then the page is not freeable */
        if (PagePrivate(page))
                return 0;
index 3c80474a265183ba30cced3dd405f3b641b9ec4b..faa091865ad05c956114ab7c7642994845717382 100644 (file)
@@ -759,7 +759,7 @@ int nfs_attribute_timeout(struct inode *inode)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
 
-       if (nfs_have_delegated_attributes(inode))
+       if (nfs_have_delegation(inode, FMODE_READ))
                return 0;
        return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
 }
index 3c7581bcdb0d3a10cd98cd25bae42daa3b7225a3..6c200595099fa3ba3c52ce290ce05851104f6b01 100644 (file)
@@ -1439,8 +1439,6 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
                nfs_post_op_update_inode(dir, o_res->dir_attr);
        } else
                nfs_refresh_inode(dir, o_res->dir_attr);
-       if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
-               server->caps &= ~NFS_CAP_POSIX_LOCK;
        if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
                status = _nfs4_proc_open_confirm(data);
                if (status != 0)
@@ -1575,7 +1573,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
        status = PTR_ERR(state);
        if (IS_ERR(state))
                goto err_opendata_put;
-       if (server->caps & NFS_CAP_POSIX_LOCK)
+       if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
                set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
        nfs4_opendata_put(opendata);
        nfs4_put_state_owner(sp);
index e81b2bf67c42ee980371dcd3b73916bbdef82312..a4cd1b79b71dd10c64fb12d9174be7ddd49ad2a0 100644 (file)
@@ -840,8 +840,8 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
                bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
                *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
                *p++ = cpu_to_be32(0);
-               *p++ = cpu_to_be32(iap->ia_atime.tv_sec);
-               *p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
+               *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
+               *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
        }
        else if (iap->ia_valid & ATTR_ATIME) {
                bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
@@ -2096,7 +2096,7 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
        encode_compound_hdr(&xdr, req, &hdr);
        encode_sequence(&xdr, &args->seq_args, &hdr);
        encode_putfh(&xdr, args->fh, &hdr);
-       replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
+       replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1;
        encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
 
        xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
index 29d9d36cd5f431e603fcae5daccebbd5d62d7fa3..a12c45b65dd42bc1712f23c7050923a6d83eed28 100644 (file)
@@ -112,10 +112,12 @@ void nfs_unlock_request(struct nfs_page *req)
  */
 int nfs_set_page_tag_locked(struct nfs_page *req)
 {
+       struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
+
        if (!nfs_lock_request_dontget(req))
                return 0;
        if (req->wb_page != NULL)
-               radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
+               radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
        return 1;
 }
 
@@ -124,10 +126,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
  */
 void nfs_clear_page_tag_locked(struct nfs_page *req)
 {
-       if (req->wb_page != NULL) {
-               struct inode *inode = req->wb_context->path.dentry->d_inode;
-               struct nfs_inode *nfsi = NFS_I(inode);
+       struct inode *inode = req->wb_context->path.dentry->d_inode;
+       struct nfs_inode *nfsi = NFS_I(inode);
 
+       if (req->wb_page != NULL) {
                spin_lock(&inode->i_lock);
                radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
                nfs_unlock_request(req);
@@ -140,22 +142,16 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
  * nfs_clear_request - Free up all resources allocated to the request
  * @req:
  *
- * Release page and open context resources associated with a read/write
- * request after it has completed.
+ * Release page resources associated with a write request after it
+ * has completed.
  */
 void nfs_clear_request(struct nfs_page *req)
 {
        struct page *page = req->wb_page;
-       struct nfs_open_context *ctx = req->wb_context;
-
        if (page != NULL) {
                page_cache_release(page);
                req->wb_page = NULL;
        }
-       if (ctx != NULL) {
-               put_nfs_open_context(ctx);
-               req->wb_context = NULL;
-       }
 }
 
 
@@ -169,8 +165,9 @@ static void nfs_free_request(struct kref *kref)
 {
        struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
 
-       /* Release struct file and open context */
+       /* Release struct file or cached credential */
        nfs_clear_request(req);
+       put_nfs_open_context(req->wb_context);
        nfs_page_free(req);
 }
 
index c0173a8531e212d8a473221c48f4ec363252c6d9..4bf23f6f93cf7b72833e4c2a6872e46ba6c15da6 100644 (file)
@@ -534,22 +534,6 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
        }
 }
 
-#ifdef CONFIG_NFS_V4
-static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
-                                   int showdefaults)
-{
-       struct nfs_client *clp = nfss->nfs_client;
-
-       seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
-       seq_printf(m, ",minorversion=%u", clp->cl_minorversion);
-}
-#else
-static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
-                                   int showdefaults)
-{
-}
-#endif
-
 /*
  * Describe the mount options in force on this server representation
  */
@@ -611,18 +595,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
 
        if (version != 4)
                nfs_show_mountd_options(m, nfss, showdefaults);
-       else
-               nfs_show_nfsv4_options(m, nfss, showdefaults);
 
+#ifdef CONFIG_NFS_V4
+       if (clp->rpc_ops->version == 4)
+               seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
+#endif
        if (nfss->options & NFS_OPTION_FSCACHE)
                seq_printf(m, ",fsc");
-
-       if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
-               if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
-                       seq_printf(m, ",lookupcache=none");
-               else
-                       seq_printf(m, ",lookupcache=pos");
-       }
 }
 
 /*
index 6ad6282e3076f4d16b6ff134253ac8a59b456063..2153f9bdbebdcb3cb094a76c73b9948815b30e2d 100644 (file)
@@ -2002,9 +2002,7 @@ nfs4_file_downgrade(struct file *filp, unsigned int share_access)
 {
        if (share_access & NFS4_SHARE_ACCESS_WRITE) {
                drop_file_write_access(filp);
-               spin_lock(&filp->f_lock);
                filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
-               spin_unlock(&filp->f_lock);
        }
 }
 
index 12f62ff353d68124403cdf2613fdf89a6d477b34..0fbd50cee1f60e8c437102fb4b2cb2cee53afbe1 100644 (file)
@@ -168,10 +168,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
        argp->p = page_address(argp->pagelist[0]);
        argp->pagelist++;
        if (argp->pagelen < PAGE_SIZE) {
-               argp->end = argp->p + (argp->pagelen>>2);
+               argp->end = p + (argp->pagelen>>2);
                argp->pagelen = 0;
        } else {
-               argp->end = argp->p + (PAGE_SIZE>>2);
+               argp->end = p + (PAGE_SIZE>>2);
                argp->pagelen -= PAGE_SIZE;
        }
        memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
@@ -1433,10 +1433,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
                        argp->p = page_address(argp->pagelist[0]);
                        argp->pagelist++;
                        if (argp->pagelen < PAGE_SIZE) {
-                               argp->end = argp->p + (argp->pagelen>>2);
+                               argp->end = p + (argp->pagelen>>2);
                                argp->pagelen = 0;
                        } else {
-                               argp->end = argp->p + (PAGE_SIZE>>2);
+                               argp->end = p + (PAGE_SIZE>>2);
                                argp->pagelen -= PAGE_SIZE;
                        }
                }
@@ -2129,15 +2129,9 @@ out_acl:
                 * and this is the root of a cross-mounted filesystem.
                 */
                if (ignore_crossmnt == 0 &&
-                   dentry == exp->ex_path.mnt->mnt_root) {
-                       struct path path = exp->ex_path;
-                       path_get(&path);
-                       while (follow_up(&path)) {
-                               if (path.dentry != path.mnt->mnt_root)
-                                       break;
-                       }
-                       err = vfs_getattr(path.mnt, path.dentry, &stat);
-                       path_put(&path);
+                   exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
+                       err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
+                               exp->ex_path.mnt->mnt_mountpoint, &stat);
                        if (err)
                                goto out_nfserr;
                }
index 4d4e2d00d15ee88573fe1a6c289bf28836eb909e..67ea83eedd43eb7b69e896e1db54f8fd995a0d9e 100644 (file)
@@ -136,7 +136,7 @@ u32 nfsd_supported_minorversion;
 int nfsd_vers(int vers, enum vers_op change)
 {
        if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
-               return 0;
+               return -1;
        switch(change) {
        case NFSD_SET:
                nfsd_versions[vers] = nfsd_version[vers];
index 63e7b108ff8353c7e9dc7817aea5f89fe78d9560..644e66727dd0e6f5464eb092b9d6db6caeac837f 100644 (file)
@@ -781,7 +781,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
        sb->s_export_op = &nilfs_export_ops;
        sb->s_root = NULL;
        sb->s_time_gran = 1;
-       sb->s_bdi = nilfs->ns_bdi;
 
        if (!nilfs_loaded(nilfs)) {
                err = load_nilfs(nilfs, sbi);
index 5d3d2a782abcf00438f82e6c1cff75c44cc059ca..1afb0a10229f448b55be812d2d1939817250cf77 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/path.h> /* struct path */
 #include <linux/slab.h> /* kmem_* */
 #include <linux/types.h>
-#include <linux/sched.h>
 
 #include "inotify.h"
 
@@ -72,9 +71,6 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
                        ret = 0;
        }
 
-       if (entry->mask & IN_ONESHOT)
-               fsnotify_destroy_mark_by_entry(entry);
-
        /*
         * If we hold the entry until after the event is on the queue
         * IN_IGNORED won't be able to pass this event in the queue
@@ -150,7 +146,6 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
        idr_for_each(&group->inotify_data.idr, idr_callback, group);
        idr_remove_all(&group->inotify_data.idr);
        idr_destroy(&group->inotify_data.idr);
-       free_uid(group->inotify_data.user);
 }
 
 void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
index aef8f5d7f94f3a08643b6e9aac54930a3649a5bb..ca44337b06cecdf00a08bcdc19e15e61d82c15cf 100644 (file)
@@ -106,11 +106,8 @@ static inline __u32 inotify_arg_to_mask(u32 arg)
 {
        __u32 mask;
 
-       /*
-        * everything should accept their own ignored, cares about children,
-        * and should receive events when the inode is unmounted
-        */
-       mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
+       /* everything should accept their own ignored and cares about children */
+       mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
 
        /* mask off the flags used to open the fd */
        mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
@@ -559,24 +556,21 @@ retry:
        if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
                goto out_err;
 
-       /* we are putting the mark on the idr, take a reference */
-       fsnotify_get_mark(&tmp_ientry->fsn_entry);
-
        spin_lock(&group->inotify_data.idr_lock);
        ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
                                group->inotify_data.last_wd+1,
                                &tmp_ientry->wd);
        spin_unlock(&group->inotify_data.idr_lock);
        if (ret) {
-               /* we didn't get on the idr, drop the idr reference */
-               fsnotify_put_mark(&tmp_ientry->fsn_entry);
-
                /* idr was out of memory allocate and try again */
                if (ret == -EAGAIN)
                        goto retry;
                goto out_err;
        }
 
+       /* we put the mark on the idr, take a reference */
+       fsnotify_get_mark(&tmp_ientry->fsn_entry);
+
        /* we are on the idr, now get on the inode */
        ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
        if (ret) {
index c8288df5712010c8485d352b8881de68813b0c4f..fbeaec762103a91eacd396f47a999244303954ee 100644 (file)
@@ -30,8 +30,6 @@
 #include "alloc.h"
 #include "dlmglue.h"
 #include "file.h"
-#include "inode.h"
-#include "journal.h"
 #include "ocfs2_fs.h"
 
 #include "xattr.h"
@@ -171,60 +169,6 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type)
        return acl;
 }
 
-/*
- * Helper function to set i_mode in memory and disk. Some call paths
- * will not have di_bh or a journal handle to pass, in which case it
- * will create it's own.
- */
-static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
-                             handle_t *handle, umode_t new_mode)
-{
-       int ret, commit_handle = 0;
-       struct ocfs2_dinode *di;
-
-       if (di_bh == NULL) {
-               ret = ocfs2_read_inode_block(inode, &di_bh);
-               if (ret) {
-                       mlog_errno(ret);
-                       goto out;
-               }
-       } else
-               get_bh(di_bh);
-
-       if (handle == NULL) {
-               handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
-                                          OCFS2_INODE_UPDATE_CREDITS);
-               if (IS_ERR(handle)) {
-                       ret = PTR_ERR(handle);
-                       mlog_errno(ret);
-                       goto out_brelse;
-               }
-
-               commit_handle = 1;
-       }
-
-       di = (struct ocfs2_dinode *)di_bh->b_data;
-       ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
-                                     OCFS2_JOURNAL_ACCESS_WRITE);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
-       }
-
-       inode->i_mode = new_mode;
-       di->i_mode = cpu_to_le16(inode->i_mode);
-
-       ocfs2_journal_dirty(handle, di_bh);
-
-out_commit:
-       if (commit_handle)
-               ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
-out_brelse:
-       brelse(di_bh);
-out:
-       return ret;
-}
-
 /*
  * Set the access or default ACL of an inode.
  */
@@ -253,14 +197,9 @@ static int ocfs2_set_acl(handle_t *handle,
                        if (ret < 0)
                                return ret;
                        else {
+                               inode->i_mode = mode;
                                if (ret == 0)
                                        acl = NULL;
-
-                               ret = ocfs2_acl_set_mode(inode, di_bh,
-                                                        handle, mode);
-                               if (ret)
-                                       return ret;
-
                        }
                }
                break;
@@ -293,30 +232,12 @@ static int ocfs2_set_acl(handle_t *handle,
 
 int ocfs2_check_acl(struct inode *inode, int mask)
 {
-       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-       struct buffer_head *di_bh = NULL;
-       struct posix_acl *acl;
-       int ret = -EAGAIN;
-
-       if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
-               return ret;
-
-       ret = ocfs2_read_inode_block(inode, &di_bh);
-       if (ret < 0) {
-               mlog_errno(ret);
-               return ret;
-       }
-
-       acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
+       struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
 
-       brelse(di_bh);
-
-       if (IS_ERR(acl)) {
-               mlog_errno(PTR_ERR(acl));
+       if (IS_ERR(acl))
                return PTR_ERR(acl);
-       }
        if (acl) {
-               ret = posix_acl_permission(inode, acl, mask);
+               int ret = posix_acl_permission(inode, acl, mask);
                posix_acl_release(acl);
                return ret;
        }
@@ -365,8 +286,7 @@ int ocfs2_init_acl(handle_t *handle,
 {
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct posix_acl *acl = NULL;
-       int ret = 0, ret2;
-       mode_t mode;
+       int ret = 0;
 
        if (!S_ISLNK(inode->i_mode)) {
                if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
@@ -375,17 +295,12 @@ int ocfs2_init_acl(handle_t *handle,
                        if (IS_ERR(acl))
                                return PTR_ERR(acl);
                }
-               if (!acl) {
-                       mode = inode->i_mode & ~current_umask();
-                       ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
-                       if (ret) {
-                               mlog_errno(ret);
-                               goto cleanup;
-                       }
-               }
+               if (!acl)
+                       inode->i_mode &= ~current_umask();
        }
        if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
                struct posix_acl *clone;
+               mode_t mode;
 
                if (S_ISDIR(inode->i_mode)) {
                        ret = ocfs2_set_acl(handle, inode, di_bh,
@@ -402,12 +317,7 @@ int ocfs2_init_acl(handle_t *handle,
                mode = inode->i_mode;
                ret = posix_acl_create_masq(clone, &mode);
                if (ret >= 0) {
-                       ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
-                       if (ret2) {
-                               mlog_errno(ret2);
-                               ret = ret2;
-                               goto cleanup;
-                       }
+                       inode->i_mode = mode;
                        if (ret > 0) {
                                ret = ocfs2_set_acl(handle, inode,
                                                    di_bh, ACL_TYPE_ACCESS,
index 5661db139ca0501c12edd4396a00fb386641b31f..38a42f5d59ff70345b7a8f5840000dc850ea6cff 100644 (file)
@@ -1765,9 +1765,9 @@ set_and_inc:
  *
  * The array index of the subtree root is passed back.
  */
-int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
-                           struct ocfs2_path *left,
-                           struct ocfs2_path *right)
+static int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
+                                  struct ocfs2_path *left,
+                                  struct ocfs2_path *right)
 {
        int i = 0;
 
@@ -2872,8 +2872,8 @@ out:
  * This looks similar, but is subtly different to
  * ocfs2_find_cpos_for_left_leaf().
  */
-int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
-                                  struct ocfs2_path *path, u32 *cpos)
+static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
+                                         struct ocfs2_path *path, u32 *cpos)
 {
        int i, j, ret = 0;
        u64 blkno;
index 1db4359ccb90b61521a0167be7581eb91e823d83..9c122d574464284ef5c998e53f3685939c76213b 100644 (file)
@@ -317,9 +317,4 @@ int ocfs2_path_bh_journal_access(handle_t *handle,
 int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
                              handle_t *handle,
                              struct ocfs2_path *path);
-int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
-                                  struct ocfs2_path *path, u32 *cpos);
-int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
-                           struct ocfs2_path *left,
-                           struct ocfs2_path *right);
 #endif /* OCFS2_ALLOC_H */
index 5fc918ca25722038e19ebaa1c9880cbd72ef5cb1..deb2b132ae5ed42b68fd11f58413f2ffa4779b83 100644 (file)
@@ -591,9 +591,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
                goto bail;
        }
 
-       /* We should already CoW the refcounted extent in case of create. */
-       BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
-
+       /* We should already CoW the refcounted extent. */
+       BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
        /*
         * get_more_blocks() expects us to describe a hole by clearing
         * the mapped bit on bh_result().
index 5a253bab122cfa541173ea9cb1c2821df42ca192..d43d34a1dd31aa3225673cdbcaf7ed07620a0388 100644 (file)
@@ -407,7 +407,6 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
                                struct buffer_head *bh)
 {
        int ret = 0;
-       struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
 
        mlog_entry_void();
 
@@ -427,7 +426,6 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
 
        get_bh(bh); /* for end_buffer_write_sync() */
        bh->b_end_io = end_buffer_write_sync;
-       ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
        submit_bh(WRITE, bh);
 
        wait_on_buffer(bh);
index 18bc101d603f8176ced2683936f50d8abea0183a..02bf17808bdc50d1c8f72aa8f8a13664bed5a146 100644 (file)
@@ -205,7 +205,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
        if ((count + *ppos) > i_size_read(inode))
                readlen = i_size_read(inode) - *ppos;
        else
-               readlen = count;
+               readlen = count - *ppos;
 
        lvb_buf = kmalloc(readlen, GFP_NOFS);
        if (!lvb_buf)
index ef1ac9ab4ee1a8504da627d037b63a74c5d20302..83bcaf266b358d7922998b3ec1d46d203531d616 100644 (file)
@@ -511,6 +511,8 @@ static void dlm_lockres_release(struct kref *kref)
 
        atomic_dec(&dlm->res_cur_count);
 
+       dlm_put(dlm);
+
        if (!hlist_unhashed(&res->hash_node) ||
            !list_empty(&res->granted) ||
            !list_empty(&res->converting) ||
@@ -583,6 +585,8 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
        res->migration_pending = 0;
        res->inflight_locks = 0;
 
+       /* put in dlm_lockres_release */
+       dlm_grab(dlm);
        res->dlm = dlm;
 
        kref_init(&res->refs);
@@ -3042,6 +3046,8 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
        /* check for pre-existing lock */
        spin_lock(&dlm->spinlock);
        res = __dlm_lookup_lockres(dlm, name, namelen, hash);
+       spin_lock(&dlm->master_lock);
+
        if (res) {
                spin_lock(&res->spinlock);
                if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -3059,15 +3065,14 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
                spin_unlock(&res->spinlock);
        }
 
-       spin_lock(&dlm->master_lock);
        /* ignore status.  only nonzero status would BUG. */
        ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
                                    name, namelen,
                                    migrate->new_master,
                                    migrate->master);
 
-       spin_unlock(&dlm->master_lock);
 unlock:
+       spin_unlock(&dlm->master_lock);
        spin_unlock(&dlm->spinlock);
 
        if (oldmle) {
index 3492550b70c8d2d834ac38a7acd02d24a25a6f3a..d9fa3d22e17c6d27134327d3fd96199038913ed0 100644 (file)
@@ -1941,8 +1941,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
        struct list_head *queue;
        struct dlm_lock *lock, *next;
 
-       assert_spin_locked(&dlm->spinlock);
-       assert_spin_locked(&res->spinlock);
        res->state |= DLM_LOCK_RES_RECOVERING;
        if (!list_empty(&res->recovering)) {
                mlog(0,
@@ -2267,15 +2265,19 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
                        /* zero the lvb if necessary */
                        dlm_revalidate_lvb(dlm, res, dead_node);
                        if (res->owner == dead_node) {
-                               if (res->state & DLM_LOCK_RES_DROPPING_REF) {
-                                       mlog(ML_NOTICE, "Ignore %.*s for "
-                                            "recovery as it is being freed\n",
-                                            res->lockname.len,
-                                            res->lockname.name);
-                               } else
-                                       dlm_move_lockres_to_recovery_list(dlm,
-                                                                         res);
+                               if (res->state & DLM_LOCK_RES_DROPPING_REF)
+                                       mlog(0, "%s:%.*s: owned by "
+                                            "dead node %u, this node was "
+                                            "dropping its ref when it died. "
+                                            "continue, dropping the flag.\n",
+                                            dlm->name, res->lockname.len,
+                                            res->lockname.name, dead_node);
+
+                               /* the wake_up for this will happen when the
+                                * RECOVERING flag is dropped later */
+                               res->state &= ~DLM_LOCK_RES_DROPPING_REF;
 
+                               dlm_move_lockres_to_recovery_list(dlm, res);
                        } else if (res->owner == dlm->node_num) {
                                dlm_free_dead_locks(dlm, res, dead_node);
                                __dlm_lockres_calc_usage(dlm, res);
index 86491f583ea56f7dc6e62f3b580334601bbfc27a..52ec020ea78b42f11ad83b3a2632dbcf08a20dde 100644 (file)
@@ -93,27 +93,19 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
  * truly ready to be freed. */
 int __dlm_lockres_unused(struct dlm_lock_resource *res)
 {
-       int bit;
-
-       if (__dlm_lockres_has_locks(res))
-               return 0;
-
-       if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
-               return 0;
-
-       if (res->state & DLM_LOCK_RES_RECOVERING)
-               return 0;
-
-       bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
-       if (bit < O2NM_MAX_NODES)
-               return 0;
-
-       /*
-        * since the bit for dlm->node_num is not set, inflight_locks better
-        * be zero
-        */
-       BUG_ON(res->inflight_locks != 0);
-       return 1;
+       if (!__dlm_lockres_has_locks(res) &&
+           (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
+               /* try not to scan the bitmap unless the first two
+                * conditions are already true */
+               int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+               if (bit >= O2NM_MAX_NODES) {
+                       /* since the bit for dlm->node_num is not
+                        * set, inflight_locks better be zero */
+                       BUG_ON(res->inflight_locks != 0);
+                       return 1;
+               }
+       }
+       return 0;
 }
 
 
@@ -161,25 +153,45 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
        spin_unlock(&dlm->spinlock);
 }
 
-static void dlm_purge_lockres(struct dlm_ctxt *dlm,
+static int dlm_purge_lockres(struct dlm_ctxt *dlm,
                             struct dlm_lock_resource *res)
 {
        int master;
        int ret = 0;
 
-       assert_spin_locked(&dlm->spinlock);
-       assert_spin_locked(&res->spinlock);
+       spin_lock(&res->spinlock);
+       if (!__dlm_lockres_unused(res)) {
+               mlog(0, "%s:%.*s: tried to purge but not unused\n",
+                    dlm->name, res->lockname.len, res->lockname.name);
+               __dlm_print_one_lock_resource(res);
+               spin_unlock(&res->spinlock);
+               BUG();
+       }
+
+       if (res->state & DLM_LOCK_RES_MIGRATING) {
+               mlog(0, "%s:%.*s: Delay dropref as this lockres is "
+                    "being remastered\n", dlm->name, res->lockname.len,
+                    res->lockname.name);
+               /* Re-add the lockres to the end of the purge list */
+               if (!list_empty(&res->purge)) {
+                       list_del_init(&res->purge);
+                       list_add_tail(&res->purge, &dlm->purge_list);
+               }
+               spin_unlock(&res->spinlock);
+               return 0;
+       }
 
        master = (res->owner == dlm->node_num);
 
+       if (!master)
+               res->state |= DLM_LOCK_RES_DROPPING_REF;
+       spin_unlock(&res->spinlock);
 
        mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
             res->lockname.name, master);
 
        if (!master) {
-               res->state |= DLM_LOCK_RES_DROPPING_REF;
                /* drop spinlock...  retake below */
-               spin_unlock(&res->spinlock);
                spin_unlock(&dlm->spinlock);
 
                spin_lock(&res->spinlock);
@@ -197,35 +209,31 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
                mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
                     dlm->name, res->lockname.len, res->lockname.name, ret);
                spin_lock(&dlm->spinlock);
-               spin_lock(&res->spinlock);
        }
 
+       spin_lock(&res->spinlock);
        if (!list_empty(&res->purge)) {
                mlog(0, "removing lockres %.*s:%p from purgelist, "
                     "master = %d\n", res->lockname.len, res->lockname.name,
                     res, master);
                list_del_init(&res->purge);
+               spin_unlock(&res->spinlock);
                dlm_lockres_put(res);
                dlm->purge_count--;
-       }
-
-       if (!__dlm_lockres_unused(res)) {
-               mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
-                    dlm->name, res->lockname.len, res->lockname.name);
-               __dlm_print_one_lock_resource(res);
-               BUG();
-       }
+       } else
+               spin_unlock(&res->spinlock);
 
        __dlm_unhash_lockres(res);
 
        /* lockres is not in the hash now.  drop the flag and wake up
         * any processes waiting in dlm_get_lock_resource. */
        if (!master) {
+               spin_lock(&res->spinlock);
                res->state &= ~DLM_LOCK_RES_DROPPING_REF;
                spin_unlock(&res->spinlock);
                wake_up(&res->wq);
-       } else
-               spin_unlock(&res->spinlock);
+       }
+       return 0;
 }
 
 static void dlm_run_purge_list(struct dlm_ctxt *dlm,
@@ -244,7 +252,17 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
                lockres = list_entry(dlm->purge_list.next,
                                     struct dlm_lock_resource, purge);
 
+               /* Status of the lockres *might* change so double
+                * check. If the lockres is unused, holding the dlm
+                * spinlock will prevent people from getting and more
+                * refs on it -- there's no need to keep the lockres
+                * spinlock. */
                spin_lock(&lockres->spinlock);
+               unused = __dlm_lockres_unused(lockres);
+               spin_unlock(&lockres->spinlock);
+
+               if (!unused)
+                       continue;
 
                purge_jiffies = lockres->last_used +
                        msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
@@ -256,29 +274,15 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
                         * in tail order, we can stop at the first
                         * unpurgable resource -- anyone added after
                         * him will have a greater last_used value */
-                       spin_unlock(&lockres->spinlock);
                        break;
                }
 
-               /* Status of the lockres *might* change so double
-                * check. If the lockres is unused, holding the dlm
-                * spinlock will prevent people from getting and more
-                * refs on it. */
-               unused = __dlm_lockres_unused(lockres);
-               if (!unused ||
-                   (lockres->state & DLM_LOCK_RES_MIGRATING)) {
-                       mlog(0, "lockres %s:%.*s: is in use or "
-                            "being remastered, used %d, state %d\n",
-                            dlm->name, lockres->lockname.len,
-                            lockres->lockname.name, !unused, lockres->state);
-                       list_move_tail(&dlm->purge_list, &lockres->purge);
-                       spin_unlock(&lockres->spinlock);
-                       continue;
-               }
-
                dlm_lockres_get(lockres);
 
-               dlm_purge_lockres(dlm, lockres);
+               /* This may drop and reacquire the dlm spinlock if it
+                * has to do migration. */
+               if (dlm_purge_lockres(dlm, lockres))
+                       BUG();
 
                dlm_lockres_put(lockres);
 
index 3fcb47959d886bf6acac096cc653194f16aba243..0297fb8982b861afdae1d974e9fdae96bf0a1f13 100644 (file)
@@ -485,11 +485,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
                                                     OCFS2_BH_IGNORE_CACHE);
        } else {
                status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
-               /*
-                * If buffer is in jbd, then its checksum may not have been
-                * computed as yet.
-                */
-               if (!status && !buffer_jbd(bh))
+               if (!status)
                        status = ocfs2_validate_inode_block(osb->sb, bh);
        }
        if (status < 0) {
@@ -563,7 +559,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
                handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
-                       handle = NULL;
                        mlog_errno(status);
                        goto out;
                }
index b5cb3ede9408944d99abc7c7c3b28c01b1ddadeb..544ac6245175f26c23249cf916c5001ae8f5d067 100644 (file)
@@ -133,7 +133,7 @@ int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
+       if (__mandatory_lock(inode))
                return -ENOLCK;
 
        return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
index 10e9527dda1f98cf0b0d5e0fa2c4610610990973..3a0df7a1b8109666b92dea616c1b45ce57a2add9 100644 (file)
@@ -968,103 +968,6 @@ out:
        return 0;
 }
 
-/*
- * Find the end range for a leaf refcount block indicated by
- * el->l_recs[index].e_blkno.
- */
-static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
-                                      struct buffer_head *ref_root_bh,
-                                      struct ocfs2_extent_block *eb,
-                                      struct ocfs2_extent_list *el,
-                                      int index,  u32 *cpos_end)
-{
-       int ret, i, subtree_root;
-       u32 cpos;
-       u64 blkno;
-       struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
-       struct ocfs2_path *left_path = NULL, *right_path = NULL;
-       struct ocfs2_extent_tree et;
-       struct ocfs2_extent_list *tmp_el;
-
-       if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
-               /*
-                * We have a extent rec after index, so just use the e_cpos
-                * of the next extent rec.
-                */
-               *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
-               return 0;
-       }
-
-       if (!eb || (eb && !eb->h_next_leaf_blk)) {
-               /*
-                * We are the last extent rec, so any high cpos should
-                * be stored in this leaf refcount block.
-                */
-               *cpos_end = UINT_MAX;
-               return 0;
-       }
-
-       /*
-        * If the extent block isn't the last one, we have to find
-        * the subtree root between this extent block and the next
-        * leaf extent block and get the corresponding e_cpos from
-        * the subroot. Otherwise we may corrupt the b-tree.
-        */
-       ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
-
-       left_path = ocfs2_new_path_from_et(&et);
-       if (!left_path) {
-               ret = -ENOMEM;
-               mlog_errno(ret);
-               goto out;
-       }
-
-       cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
-       ret = ocfs2_find_path(ci, left_path, cpos);
-       if (ret) {
-               mlog_errno(ret);
-               goto out;
-       }
-
-       right_path = ocfs2_new_path_from_path(left_path);
-       if (!right_path) {
-               ret = -ENOMEM;
-               mlog_errno(ret);
-               goto out;
-       }
-
-       ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
-       if (ret) {
-               mlog_errno(ret);
-               goto out;
-       }
-
-       ret = ocfs2_find_path(ci, right_path, cpos);
-       if (ret) {
-               mlog_errno(ret);
-               goto out;
-       }
-
-       subtree_root = ocfs2_find_subtree_root(&et, left_path,
-                                              right_path);
-
-       tmp_el = left_path->p_node[subtree_root].el;
-       blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
-       for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
-               if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
-                       *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
-                       break;
-               }
-       }
-
-       BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
-
-out:
-       ocfs2_free_path(left_path);
-       ocfs2_free_path(right_path);
-       return ret;
-}
-
 /*
  * Given a cpos and len, try to find the refcount record which contains cpos.
  * 1. If cpos can be found in one refcount record, return the record.
@@ -1080,10 +983,10 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
                                  struct buffer_head **ret_bh)
 {
        int ret = 0, i, found;
-       u32 low_cpos, uninitialized_var(cpos_end);
+       u32 low_cpos;
        struct ocfs2_extent_list *el;
-       struct ocfs2_extent_rec *rec = NULL;
-       struct ocfs2_extent_block *eb = NULL;
+       struct ocfs2_extent_rec *tmp, *rec = NULL;
+       struct ocfs2_extent_block *eb;
        struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
        struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
        struct ocfs2_refcount_block *rb =
@@ -1131,16 +1034,12 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
                }
        }
 
-       if (found) {
-               ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
-                                                 eb, el, i, &cpos_end);
-               if (ret) {
-                       mlog_errno(ret);
-                       goto out;
-               }
+       /* adjust len when we have ocfs2_extent_rec after it. */
+       if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
+               tmp = &el->l_recs[i+1];
 
-               if (cpos_end < low_cpos + len)
-                       len = cpos_end - low_cpos;
+               if (le32_to_cpu(tmp->e_cpos) < cpos + len)
+                       len = le32_to_cpu(tmp->e_cpos) - cpos;
        }
 
        ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
@@ -2454,26 +2353,16 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
                len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
                          le32_to_cpu(rec.r_clusters)) - cpos;
                /*
+                * If the refcount rec already exist, cool. We just need
+                * to check whether there is a split. Otherwise we just need
+                * to increase the refcount.
+                * If we will insert one, increases recs_add.
+                *
                 * We record all the records which will be inserted to the
                 * same refcount block, so that we can tell exactly whether
                 * we need a new refcount block or not.
-                *
-                * If we will insert a new one, this is easy and only happens
-                * during adding refcounted flag to the extent, so we don't
-                * have a chance of spliting. We just need one record.
-                *
-                * If the refcount rec already exists, that would be a little
-                * complicated. we may have to:
-                * 1) split at the beginning if the start pos isn't aligned.
-                *    we need 1 more record in this case.
-                * 2) split int the end if the end pos isn't aligned.
-                *    we need 1 more record in this case.
-                * 3) split in the middle because of file system fragmentation.
-                *    we need 2 more records in this case(we can't detect this
-                *    beforehand, so always think of the worst case).
                 */
                if (rec.r_refcount) {
-                       recs_add += 2;
                        /* Check whether we need a split at the beginning. */
                        if (cpos == start_cpos &&
                            cpos != le64_to_cpu(rec.r_cpos))
@@ -4106,9 +3995,6 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
        di->i_attr = s_di->i_attr;
 
        if (preserve) {
-               t_inode->i_uid = s_inode->i_uid;
-               t_inode->i_gid = s_inode->i_gid;
-               t_inode->i_mode = s_inode->i_mode;
                di->i_uid = s_di->i_uid;
                di->i_gid = s_di->i_gid;
                di->i_mode = s_di->i_mode;
index 79b5dacf9312b0b5d616c915b09c00bdb92bbeb6..c30b644d9572408816154db8c91c275d3136a0ac 100644 (file)
@@ -152,7 +152,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
 
 #define do_error(fmt, ...)                                             \
        do{                                                             \
-               if (resize)                                     \
+               if (clean_error)                                        \
                        mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__);        \
                else                                                    \
                        ocfs2_error(sb, fmt, ##__VA_ARGS__);            \
@@ -160,7 +160,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
 
 static int ocfs2_validate_gd_self(struct super_block *sb,
                                  struct buffer_head *bh,
-                                 int resize)
+                                 int clean_error)
 {
        struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 
@@ -211,7 +211,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb,
 static int ocfs2_validate_gd_parent(struct super_block *sb,
                                    struct ocfs2_dinode *di,
                                    struct buffer_head *bh,
-                                   int resize)
+                                   int clean_error)
 {
        unsigned int max_bits;
        struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -233,11 +233,8 @@ static int ocfs2_validate_gd_parent(struct super_block *sb,
                return -EINVAL;
        }
 
-       /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
-       if ((le16_to_cpu(gd->bg_chain) >
-            le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
-           ((le16_to_cpu(gd->bg_chain) ==
-            le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
+       if (le16_to_cpu(gd->bg_chain) >=
+           le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
                do_error("Group descriptor #%llu has bad chain %u",
                         (unsigned long long)bh->b_blocknr,
                         le16_to_cpu(gd->bg_chain));
index 9f55be445bd647355e930b5636de531032dacd2f..14f47d2bfe02eb6500666e8a63a227a2d4ee10f2 100644 (file)
@@ -701,10 +701,6 @@ unlock_osb:
 
                if (!ocfs2_is_hard_readonly(osb))
                        ocfs2_set_journal_params(osb);
-
-               sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-                       ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
-                                                       MS_POSIXACL : 0);
        }
 out:
        unlock_kernel();
index 91e656fd528b82e74bda7520ee8c113da52dda70..e3421030a69f713971a78e7d5d525098a4e44416 100644 (file)
@@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
        }
 
        /* Fast symlinks can't be large */
-       len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
+       len = strlen(target);
        link = kzalloc(len + 1, GFP_NOFS);
        if (!link) {
                status = -ENOMEM;
index bae725b7febd3019772b21e97e16832c06875d80..fc71aab084603749abc8138ee860c2f17faa05fa 100644 (file)
@@ -74,7 +74,6 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
        } *label;
        unsigned char *data;
        Sector sect;
-       sector_t labelsect;
 
        res = 0;
        blocksize = bdev_logical_block_size(bdev);
@@ -98,20 +97,10 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
            ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
                goto out_freeall;
 
-       /*
-        * Special case for FBA disks: label sector does not depend on
-        * blocksize.
-        */
-       if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
-           (info->cu_type == 0x3880 && info->dev_type == 0x3370))
-               labelsect = info->label_block;
-       else
-               labelsect = info->label_block * (blocksize >> 9);
-
        /*
         * Get volume label, extract name and type.
         */
-       data = read_dev_sector(bdev, labelsect, &sect);
+       data = read_dev_sector(bdev, info->label_block*(blocksize/512), &sect);
        if (data == NULL)
                goto out_readerr;
 
index 90be97f1f5a8c8ce5cf737c3e52020e0e516a291..0028d2ef0662b0f01fd21b7685754d9939ebbd51 100644 (file)
  */
 #include <asm/unaligned.h>
 
-#define SYS_IND(p)     get_unaligned(&p->sys_ind)
+#define SYS_IND(p)     (get_unaligned(&p->sys_ind))
+#define NR_SECTS(p)    ({ __le32 __a = get_unaligned(&p->nr_sects);    \
+                               le32_to_cpu(__a); \
+                       })
 
-static inline sector_t nr_sects(struct partition *p)
-{
-       return (sector_t)get_unaligned_le32(&p->nr_sects);
-}
-
-static inline sector_t start_sect(struct partition *p)
-{
-       return (sector_t)get_unaligned_le32(&p->start_sect);
-}
+#define START_SECT(p)  ({ __le32 __a = get_unaligned(&p->start_sect);  \
+                               le32_to_cpu(__a); \
+                       })
 
 static inline int is_extended_partition(struct partition *p)
 {
@@ -107,13 +104,13 @@ static int aix_magic_present(unsigned char *p, struct block_device *bdev)
 
 static void
 parse_extended(struct parsed_partitions *state, struct block_device *bdev,
-                       sector_t first_sector, sector_t first_size)
+                       u32 first_sector, u32 first_size)
 {
        struct partition *p;
        Sector sect;
        unsigned char *data;
-       sector_t this_sector, this_size;
-       sector_t sector_size = bdev_logical_block_size(bdev) / 512;
+       u32 this_sector, this_size;
+       int sector_size = bdev_logical_block_size(bdev) / 512;
        int loopct = 0;         /* number of links followed
                                   without finding a data partition */
        int i;
@@ -148,14 +145,14 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
                 * First process the data partition(s)
                 */
                for (i=0; i<4; i++, p++) {
-                       sector_t offs, size, next;
-                       if (!nr_sects(p) || is_extended_partition(p))
+                       u32 offs, size, next;
+                       if (!NR_SECTS(p) || is_extended_partition(p))
                                continue;
 
                        /* Check the 3rd and 4th entries -
                           these sometimes contain random garbage */
-                       offs = start_sect(p)*sector_size;
-                       size = nr_sects(p)*sector_size;
+                       offs = START_SECT(p)*sector_size;
+                       size = NR_SECTS(p)*sector_size;
                        next = this_sector + offs;
                        if (i >= 2) {
                                if (offs + size > this_size)
@@ -182,13 +179,13 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
                 */
                p -= 4;
                for (i=0; i<4; i++, p++)
-                       if (nr_sects(p) && is_extended_partition(p))
+                       if (NR_SECTS(p) && is_extended_partition(p))
                                break;
                if (i == 4)
                        goto done;       /* nothing left to do */
 
-               this_sector = first_sector + start_sect(p) * sector_size;
-               this_size = nr_sects(p) * sector_size;
+               this_sector = first_sector + START_SECT(p) * sector_size;
+               this_size = NR_SECTS(p) * sector_size;
                put_dev_sector(sect);
        }
 done:
@@ -200,7 +197,7 @@ done:
 
 static void
 parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
-                       sector_t offset, sector_t size, int origin)
+                       u32 offset, u32 size, int origin)
 {
 #ifdef CONFIG_SOLARIS_X86_PARTITION
        Sector sect;
@@ -247,7 +244,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
  */
 static void
 parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
-               sector_t offset, sector_t size, int origin, char *flavour,
+               u32 offset, u32 size, int origin, char *flavour,
                int max_partitions)
 {
        Sector sect;
@@ -266,7 +263,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
        if (le16_to_cpu(l->d_npartitions) < max_partitions)
                max_partitions = le16_to_cpu(l->d_npartitions);
        for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
-               sector_t bsd_start, bsd_size;
+               u32 bsd_start, bsd_size;
 
                if (state->next == state->limit)
                        break;
@@ -293,7 +290,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
 
 static void
 parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
-               sector_t offset, sector_t size, int origin)
+               u32 offset, u32 size, int origin)
 {
 #ifdef CONFIG_BSD_DISKLABEL
        parse_bsd(state, bdev, offset, size, origin,
@@ -303,7 +300,7 @@ parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
 
 static void
 parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
-               sector_t offset, sector_t size, int origin)
+               u32 offset, u32 size, int origin)
 {
 #ifdef CONFIG_BSD_DISKLABEL
        parse_bsd(state, bdev, offset, size, origin,
@@ -313,7 +310,7 @@ parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
 
 static void
 parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
-               sector_t offset, sector_t size, int origin)
+               u32 offset, u32 size, int origin)
 {
 #ifdef CONFIG_BSD_DISKLABEL
        parse_bsd(state, bdev, offset, size, origin,
@@ -327,7 +324,7 @@ parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
  */
 static void
 parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
-               sector_t offset, sector_t size, int origin)
+               u32 offset, u32 size, int origin)
 {
 #ifdef CONFIG_UNIXWARE_DISKLABEL
        Sector sect;
@@ -351,8 +348,7 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
 
                if (p->s_label != UNIXWARE_FS_UNUSED)
                        put_partition(state, state->next++,
-                                     le32_to_cpu(p->start_sect),
-                                     le32_to_cpu(p->nr_sects));
+                                               START_SECT(p), NR_SECTS(p));
                p++;
        }
        put_dev_sector(sect);
@@ -367,7 +363,7 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
  */
 static void
 parse_minix(struct parsed_partitions *state, struct block_device *bdev,
-               sector_t offset, sector_t size, int origin)
+               u32 offset, u32 size, int origin)
 {
 #ifdef CONFIG_MINIX_SUBPARTITION
        Sector sect;
@@ -394,7 +390,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
                        /* add each partition in use */
                        if (SYS_IND(p) == MINIX_PARTITION)
                                put_partition(state, state->next++,
-                                             start_sect(p), nr_sects(p));
+                                             START_SECT(p), NR_SECTS(p));
                }
                printk(" >\n");
        }
@@ -405,7 +401,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
 static struct {
        unsigned char id;
        void (*parse)(struct parsed_partitions *, struct block_device *,
-                       sector_t, sector_t, int);
+                       u32, u32, int);
 } subtypes[] = {
        {FREEBSD_PARTITION, parse_freebsd},
        {NETBSD_PARTITION, parse_netbsd},
@@ -419,7 +415,7 @@ static struct {
  
 int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
 {
-       sector_t sector_size = bdev_logical_block_size(bdev) / 512;
+       int sector_size = bdev_logical_block_size(bdev) / 512;
        Sector sect;
        unsigned char *data;
        struct partition *p;
@@ -487,21 +483,14 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
 
        state->next = 5;
        for (slot = 1 ; slot <= 4 ; slot++, p++) {
-               sector_t start = start_sect(p)*sector_size;
-               sector_t size = nr_sects(p)*sector_size;
+               u32 start = START_SECT(p)*sector_size;
+               u32 size = NR_SECTS(p)*sector_size;
                if (!size)
                        continue;
                if (is_extended_partition(p)) {
-                       /*
-                        * prevent someone doing mkfs or mkswap on an
-                        * extended partition, but leave room for LILO
-                        * FIXME: this uses one logical sector for > 512b
-                        * sector, although it may not be enough/proper.
-                        */
-                       sector_t n = 2;
-                       n = min(size, max(sector_size, n));
-                       put_partition(state, slot, start, n);
-
+                       /* prevent someone doing mkfs or mkswap on an
+                          extended partition, but leave room for LILO */
+                       put_partition(state, slot, start, size == 1 ? 1 : 2);
                        printk(" <");
                        parse_extended(state, bdev, start, size);
                        printk(" >");
@@ -524,7 +513,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
                unsigned char id = SYS_IND(p);
                int n;
 
-               if (!nr_sects(p))
+               if (!NR_SECTS(p))
                        continue;
 
                for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
@@ -532,8 +521,8 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
 
                if (!subtypes[n].parse)
                        continue;
-               subtypes[n].parse(state, bdev, start_sect(p)*sector_size,
-                                               nr_sects(p)*sector_size, slot);
+               subtypes[n].parse(state, bdev, START_SECT(p)*sector_size,
+                                               NR_SECTS(p)*sector_size, slot);
        }
        put_dev_sector(sect);
        return 1;
index d0cc080c732b7abba788ac24913c8175449876db..ae17d026aaa3f496fc0bf9d8522f52d027d8dadb 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -363,7 +363,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
                        error = ops->confirm(pipe, buf);
                        if (error) {
                                if (!ret)
-                                       ret = error;
+                                       error = ret;
                                break;
                        }
 
index 42fdc765f8170caa3c5a0608b9539d086e175eed..822c2d5065189906cd6a63bedd27875290964e04 100644 (file)
@@ -82,6 +82,7 @@
 #include <linux/pid_namespace.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
+#include <linux/swapops.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -321,6 +322,94 @@ static inline void task_context_switch_counts(struct seq_file *m,
                        p->nivcsw);
 }
 
+#ifdef CONFIG_MMU
+
+struct stack_stats {
+       struct vm_area_struct *vma;
+       unsigned long   startpage;
+       unsigned long   usage;
+};
+
+static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
+                               unsigned long end, struct mm_walk *walk)
+{
+       struct stack_stats *ss = walk->private;
+       struct vm_area_struct *vma = ss->vma;
+       pte_t *pte, ptent;
+       spinlock_t *ptl;
+       int ret = 0;
+
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+
+#ifdef CONFIG_STACK_GROWSUP
+               if (pte_present(ptent) || is_swap_pte(ptent))
+                       ss->usage = addr - ss->startpage + PAGE_SIZE;
+#else
+               if (pte_present(ptent) || is_swap_pte(ptent)) {
+                       ss->usage = ss->startpage - addr + PAGE_SIZE;
+                       pte++;
+                       ret = 1;
+                       break;
+               }
+#endif
+       }
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+       return ret;
+}
+
+static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
+                               struct task_struct *task)
+{
+       struct stack_stats ss;
+       struct mm_walk stack_walk = {
+               .pmd_entry = stack_usage_pte_range,
+               .mm = vma->vm_mm,
+               .private = &ss,
+       };
+
+       if (!vma->vm_mm || is_vm_hugetlb_page(vma))
+               return 0;
+
+       ss.vma = vma;
+       ss.startpage = task->stack_start & PAGE_MASK;
+       ss.usage = 0;
+
+#ifdef CONFIG_STACK_GROWSUP
+       walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
+               &stack_walk);
+#else
+       walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
+               &stack_walk);
+#endif
+       return ss.usage;
+}
+
+static inline void task_show_stack_usage(struct seq_file *m,
+                                               struct task_struct *task)
+{
+       struct vm_area_struct   *vma;
+       struct mm_struct        *mm = get_task_mm(task);
+
+       if (mm) {
+               down_read(&mm->mmap_sem);
+               vma = find_vma(mm, task->stack_start);
+               if (vma)
+                       seq_printf(m, "Stack usage:\t%lu kB\n",
+                               get_stack_usage_in_bytes(vma, task) >> 10);
+
+               up_read(&mm->mmap_sem);
+               mmput(mm);
+       }
+}
+#else
+static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
+{
+}
+#endif         /* CONFIG_MMU */
+
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task)
 {
@@ -340,6 +429,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
        task_show_regs(m, task);
 #endif
        task_context_switch_counts(m, task);
+       task_show_stack_usage(m, task);
        return 0;
 }
 
@@ -405,6 +495,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
                /* add up live thread stats at the group level */
                if (whole) {
+                       struct task_cputime cputime;
                        struct task_struct *t = task;
                        do {
                                min_flt += t->min_flt;
@@ -415,7 +506,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
                        min_flt += sig->min_flt;
                        maj_flt += sig->maj_flt;
-                       thread_group_times(task, &utime, &stime);
+                       thread_group_cputime(task, &cputime);
+                       utime = cputime.utime;
+                       stime = cputime.stime;
                        gtime = cputime_add(gtime, sig->gtime);
                }
 
@@ -478,7 +571,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                rsslim,
                mm ? mm->start_code : 0,
                mm ? mm->end_code : 0,
-               (permitted && mm) ? mm->start_stack : 0,
+               (permitted && mm) ? task->stack_start : 0,
                esp,
                eip,
                /* The signal information here is obsolete.
index 8ea4f9bc5236a8d3c0acb9ff5417e3f3587f9987..d7367affea2ac13b21f483849f57920735ddb1a5 100644 (file)
@@ -449,13 +449,12 @@ static const struct file_operations proc_lstats_operations = {
 unsigned long badness(struct task_struct *p, unsigned long uptime);
 static int proc_oom_score(struct task_struct *task, char *buffer)
 {
-       unsigned long points = 0;
+       unsigned long points;
        struct timespec uptime;
 
        do_posix_clock_monotonic_gettime(&uptime);
        read_lock(&tasklist_lock);
-       if (pid_alive(task))
-               points = badness(task, uptime.tv_sec);
+       points = badness(task->group_leader, uptime.tv_sec);
        read_unlock(&tasklist_lock);
        return sprintf(buffer, "%lu\n", points);
 }
@@ -2339,30 +2338,16 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct pid_namespace *ns = dentry->d_sb->s_fs_info;
        pid_t tgid = task_tgid_nr_ns(current, ns);
-       char *name = ERR_PTR(-ENOENT);
-       if (tgid) {
-               name = __getname();
-               if (!name)
-                       name = ERR_PTR(-ENOMEM);
-               else
-                       sprintf(name, "%d", tgid);
-       }
-       nd_set_link(nd, name);
-       return NULL;
-}
-
-static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
-                               void *cookie)
-{
-       char *s = nd_get_link(nd);
-       if (!IS_ERR(s))
-               __putname(s);
+       char tmp[PROC_NUMBUF];
+       if (!tgid)
+               return ERR_PTR(-ENOENT);
+       sprintf(tmp, "%d", task_tgid_nr_ns(current, ns));
+       return ERR_PTR(vfs_follow_link(nd,tmp));
 }
 
 static const struct inode_operations proc_self_inode_operations = {
        .readlink       = proc_self_readlink,
        .follow_link    = proc_self_follow_link,
-       .put_link       = proc_self_put_link,
 };
 
 /*
@@ -2878,7 +2863,7 @@ out_no_task:
  */
 static const struct pid_entry tid_base_stuff[] = {
        DIR("fd",        S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
-       DIR("fdinfo",    S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
+       DIR("fdinfo",    S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
        REG("environ",   S_IRUSR, proc_environ_operations),
        INF("auxv",      S_IRUSR, proc_pid_auxv),
        ONE("status",    S_IRUGO, proc_pid_status),
index e085035a00546a266b3d839970c4561c107463b6..8e5fae2f7aed8f7fdeb3c02dd01c2dcddf3509f7 100644 (file)
@@ -206,7 +206,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
        int flags = vma->vm_flags;
        unsigned long ino = 0;
        unsigned long long pgoff = 0;
-       unsigned long start;
        dev_t dev = 0;
        int len;
 
@@ -217,14 +216,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
                pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
        }
 
-       /* We don't show the stack guard page in /proc/maps */
-       start = vma->vm_start;
-       if (vma->vm_flags & VM_GROWSDOWN)
-               if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
-                       start += PAGE_SIZE;
-
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
-                       start,
+                       vma->vm_start,
                        vma->vm_end,
                        flags & VM_READ ? 'r' : '-',
                        flags & VM_WRITE ? 'w' : '-',
@@ -250,6 +243,25 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
                                } else if (vma->vm_start <= mm->start_stack &&
                                           vma->vm_end >= mm->start_stack) {
                                        name = "[stack]";
+                               } else {
+                                       unsigned long stack_start;
+                                       struct proc_maps_private *pmp;
+
+                                       pmp = m->private;
+                                       stack_start = pmp->task->stack_start;
+
+                                       if (vma->vm_start <= stack_start &&
+                                           vma->vm_end >= stack_start) {
+                                               pad_len_spaces(m, len);
+                                               seq_printf(m,
+                                                "[threadstack:%08lx]",
+#ifdef CONFIG_STACK_GROWSUP
+                                                vma->vm_end - stack_start
+#else
+                                                stack_start - vma->vm_start
+#endif
+                                               );
+                                       }
                                }
                        } else {
                                name = "[vdso]";
index 4fdb0eb88184866bf45c92734daf8845844a9525..253498739978f16937d0bce16306f378c8f33a3e 100644 (file)
@@ -229,8 +229,6 @@ static struct hlist_head *dquot_hash;
 struct dqstats dqstats;
 EXPORT_SYMBOL(dqstats);
 
-static qsize_t inode_get_rsv_space(struct inode *inode);
-
 static inline unsigned int
 hashfn(const struct super_block *sb, unsigned int id, int type)
 {
@@ -822,14 +820,11 @@ static int dqinit_needed(struct inode *inode, int type)
 static void add_dquot_ref(struct super_block *sb, int type)
 {
        struct inode *inode, *old_inode = NULL;
-       int reserved = 0;
 
        spin_lock(&inode_lock);
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
                        continue;
-               if (unlikely(inode_get_rsv_space(inode) > 0))
-                       reserved = 1;
                if (!atomic_read(&inode->i_writecount))
                        continue;
                if (!dqinit_needed(inode, type))
@@ -850,12 +845,6 @@ static void add_dquot_ref(struct super_block *sb, int type)
        }
        spin_unlock(&inode_lock);
        iput(old_inode);
-
-       if (reserved) {
-               printk(KERN_WARNING "VFS (%s): Writes happened before quota"
-                       " was turned on thus quota information is probably "
-                       "inconsistent. Please run quotacheck(8).\n", sb->s_id);
-       }
 }
 
 /*
@@ -969,12 +958,10 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
 /*
  * Claim reserved quota space
  */
-static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
+static void dquot_claim_reserved_space(struct dquot *dquot,
+                                               qsize_t number)
 {
-       if (dquot->dq_dqb.dqb_rsvspace < number) {
-               WARN_ON_ONCE(1);
-               number = dquot->dq_dqb.dqb_rsvspace;
-       }
+       WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
        dquot->dq_dqb.dqb_curspace += number;
        dquot->dq_dqb.dqb_rsvspace -= number;
 }
@@ -982,12 +969,7 @@ static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
 static inline
 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
 {
-       if (dquot->dq_dqb.dqb_rsvspace >= number)
-               dquot->dq_dqb.dqb_rsvspace -= number;
-       else {
-               WARN_ON_ONCE(1);
-               dquot->dq_dqb.dqb_rsvspace = 0;
-       }
+       dquot->dq_dqb.dqb_rsvspace -= number;
 }
 
 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
@@ -1305,7 +1287,6 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
                return QUOTA_NL_BHARDBELOW;
        return QUOTA_NL_NOWARN;
 }
-
 /*
  *     Initialize quota pointers in inode
  *     We do things in a bit complicated way but by that we avoid calling
@@ -1317,7 +1298,6 @@ int dquot_initialize(struct inode *inode, int type)
        int cnt, ret = 0;
        struct dquot *got[MAXQUOTAS] = { NULL, NULL };
        struct super_block *sb = inode->i_sb;
-       qsize_t rsv;
 
        /* First test before acquiring mutex - solves deadlocks when we
          * re-enter the quota code and are already holding the mutex */
@@ -1352,13 +1332,6 @@ int dquot_initialize(struct inode *inode, int type)
                if (!inode->i_dquot[cnt]) {
                        inode->i_dquot[cnt] = got[cnt];
                        got[cnt] = NULL;
-                       /*
-                        * Make quota reservation system happy if someone
-                        * did a write before quota was turned on
-                        */
-                       rsv = inode_get_rsv_space(inode);
-                       if (unlikely(rsv))
-                               dquot_resv_space(inode->i_dquot[cnt], rsv);
                }
        }
 out_err:
@@ -1426,30 +1399,28 @@ static qsize_t *inode_reserved_space(struct inode * inode)
        return inode->i_sb->dq_op->get_reserved_space(inode);
 }
 
-void inode_add_rsv_space(struct inode *inode, qsize_t number)
+static void inode_add_rsv_space(struct inode *inode, qsize_t number)
 {
        spin_lock(&inode->i_lock);
        *inode_reserved_space(inode) += number;
        spin_unlock(&inode->i_lock);
 }
-EXPORT_SYMBOL(inode_add_rsv_space);
 
-void inode_claim_rsv_space(struct inode *inode, qsize_t number)
+
+static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
 {
        spin_lock(&inode->i_lock);
        *inode_reserved_space(inode) -= number;
        __inode_add_bytes(inode, number);
        spin_unlock(&inode->i_lock);
 }
-EXPORT_SYMBOL(inode_claim_rsv_space);
 
-void inode_sub_rsv_space(struct inode *inode, qsize_t number)
+static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
 {
        spin_lock(&inode->i_lock);
        *inode_reserved_space(inode) -= number;
        spin_unlock(&inode->i_lock);
 }
-EXPORT_SYMBOL(inode_sub_rsv_space);
 
 static qsize_t inode_get_rsv_space(struct inode *inode)
 {
@@ -2389,34 +2360,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
        if (di->dqb_valid & QIF_SPACE) {
                dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
                check_blim = 1;
-               set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
+               __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
        }
        if (di->dqb_valid & QIF_BLIMITS) {
                dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
                dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
                check_blim = 1;
-               set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
+               __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
        }
        if (di->dqb_valid & QIF_INODES) {
                dm->dqb_curinodes = di->dqb_curinodes;
                check_ilim = 1;
-               set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
+               __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
        }
        if (di->dqb_valid & QIF_ILIMITS) {
                dm->dqb_isoftlimit = di->dqb_isoftlimit;
                dm->dqb_ihardlimit = di->dqb_ihardlimit;
                check_ilim = 1;
-               set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
+               __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
        }
        if (di->dqb_valid & QIF_BTIME) {
                dm->dqb_btime = di->dqb_btime;
                check_blim = 1;
-               set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
+               __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
        }
        if (di->dqb_valid & QIF_ITIME) {
                dm->dqb_itime = di->dqb_itime;
                check_ilim = 1;
-               set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
+               __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
        }
 
        if (check_blim) {
index d42c30ceaee53e9e720ff733b4286473b63cb6c3..6d2668fdc3848eb5b2be29d027c5634c727148f6 100644 (file)
@@ -45,6 +45,8 @@ static inline bool is_privroot_deh(struct dentry *dir,
                                   struct reiserfs_de_head *deh)
 {
        struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
+       if (reiserfs_expose_privroot(dir->d_sb))
+               return 0;
        return (dir == dir->d_parent && privroot->d_inode &&
                deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
 }
index b5fe0aa033e7148a877d1bb7bf170f068c6f6f40..90622200b39c0622e0f159d423c929a036d76257 100644 (file)
@@ -2184,15 +2184,6 @@ static int journal_read_transaction(struct super_block *sb,
                brelse(d_bh);
                return 1;
        }
-
-       if (bdev_read_only(sb->s_bdev)) {
-               reiserfs_warning(sb, "clm-2076",
-                                "device is readonly, unable to replay log");
-               brelse(c_bh);
-               brelse(d_bh);
-               return -EROFS;
-       }
-
        trans_id = get_desc_trans_id(desc);
        /* now we know we've got a good transaction, and it was inside the valid time ranges */
        log_blocks = kmalloc(get_desc_trans_len(desc) *
@@ -2431,6 +2422,12 @@ static int journal_read(struct super_block *sb)
                goto start_log_replay;
        }
 
+       if (continue_replay && bdev_read_only(sb->s_bdev)) {
+               reiserfs_warning(sb, "clm-2076",
+                                "device is readonly, unable to replay log");
+               return -1;
+       }
+
        /* ok, there are transactions that need to be replayed.  start with the first log block, find
         ** all the valid transactions, and pick out the oldest.
         */
index cc1caa26be52d149906f06403741dee43a598cfb..6925b835a43b6f2f94e8a4217180b2e8d1735ab7 100644 (file)
@@ -536,7 +536,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
        if (!err && new_size < i_size_read(dentry->d_inode)) {
                struct iattr newattrs = {
                        .ia_ctime = current_fs_time(inode->i_sb),
-                       .ia_size = new_size,
+                       .ia_size = buffer_size,
                        .ia_valid = ATTR_SIZE | ATTR_CTIME,
                };
                mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
@@ -952,13 +952,21 @@ int reiserfs_permission(struct inode *inode, int mask)
        return generic_permission(inode, mask, NULL);
 }
 
-static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
+/* This will catch lookups from the fs root to .reiserfs_priv */
+static int
+xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
 {
-       return -EPERM;
+       struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
+       if (container_of(q1, struct dentry, d_name) == priv_root)
+               return -ENOENT;
+       if (q1->len == name->len &&
+                  !memcmp(q1->name, name->name, name->len))
+               return 0;
+       return 1;
 }
 
 static const struct dentry_operations xattr_lookup_poison_ops = {
-       .d_revalidate = xattr_hide_revalidate,
+       .d_compare = xattr_lookup_poison,
 };
 
 int reiserfs_lookup_privroot(struct super_block *s)
@@ -972,7 +980,8 @@ int reiserfs_lookup_privroot(struct super_block *s)
                                strlen(PRIVROOT_NAME));
        if (!IS_ERR(dentry)) {
                REISERFS_SB(s)->priv_root = dentry;
-               dentry->d_op = &xattr_lookup_poison_ops;
+               if (!reiserfs_expose_privroot(s))
+                       s->s_root->d_op = &xattr_lookup_poison_ops;
                if (dentry->d_inode)
                        dentry->d_inode->i_flags |= S_PRIVATE;
        } else
index b37b13b93d89f719b43e26e9c9eb4b1f092b2e1a..a92c8792c0f6d82d95fe00b8b3acbdc54802fd80 100644 (file)
@@ -75,7 +75,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
                return error;
        }
 
-       if (sec->length && reiserfs_xattrs_initialized(inode->i_sb)) {
+       if (sec->length) {
                blocks = reiserfs_xattr_jcreate_nblocks(inode) +
                         reiserfs_xattr_nblocks(inode, sec->length);
                /* We don't want to count the directories twice if we have
index d98bea8865c15b3c122c3e99d740c241330034a8..b07565c9438672015aa041e3fb57e4b01ca6e27c 100644 (file)
@@ -87,7 +87,6 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
                 err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
                 err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
                 err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
-                err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
                break;
        case __SI_POLL:
                err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
@@ -111,7 +110,6 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
                err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
                err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
                err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
-               err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
                break;
        default:
                /*
index e5efbb96d2bdad48ebb4266198fd3b9f5fa7f629..7394e9e17534ecb03573e28b2288d06cccf24a32 100644 (file)
@@ -365,7 +365,17 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
                 * If the page isn't uptodate, we may need to start io on it
                 */
                if (!PageUptodate(page)) {
-                       lock_page(page);
+                       /*
+                        * If in nonblock mode then dont block on waiting
+                        * for an in-flight io page
+                        */
+                       if (flags & SPLICE_F_NONBLOCK) {
+                               if (!trylock_page(page)) {
+                                       error = -EAGAIN;
+                                       break;
+                               }
+                       } else
+                               lock_page(page);
 
                        /*
                         * Page was truncated, or invalidated by the
index 7118a383a87a2623fe0250958c133d2ad385b801..f5ea4680f15fdca8a009f66a979bd6d3805244f6 100644 (file)
@@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
        char *p;
 
        p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
-       if (!IS_ERR(p))
+       if (p)
                memmove(last_sysfs_file, p, strlen(p) + 1);
 
        /* need attr_sd for attr and ops, its parent for kobj */
index 4c7b14503716d3e01bfee8d48ab930f6fdbd008a..b23a5450644608fddbc7d5146e6c20f638209450 100644 (file)
@@ -250,9 +250,8 @@ xfs_set_mode(struct inode *inode, mode_t mode)
        if (mode != inode->i_mode) {
                struct iattr iattr;
 
-               iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
+               iattr.ia_valid = ATTR_MODE;
                iattr.ia_mode = mode;
-               iattr.ia_ctime = current_fs_time(inode->i_sb);
 
                error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
        }
index 7263002fac643f6da9df6ea561ac3cb36054908a..c2e30eea74dc2cd6344dd6710806465b029db422 100644 (file)
@@ -204,17 +204,14 @@ xfs_ioend_new_eof(
 }
 
 /*
- * Update on-disk file size now that data has been written to disk.  The
- * current in-memory file size is i_size.  If a write is beyond eof i_new_size
- * will be the intended file size until i_size is updated.  If this write does
- * not extend all the way to the valid file size then restrict this update to
- * the end of the write.
- *
- * This function does not block as blocking on the inode lock in IO completion
- * can lead to IO completion order dependency deadlocks.. If it can't get the
- * inode ilock it will return EAGAIN. Callers must handle this.
+ * Update on-disk file size now that data has been written to disk.
+ * The current in-memory file size is i_size.  If a write is beyond
+ * eof i_new_size will be the intended file size until i_size is
+ * updated.  If this write does not extend all the way to the valid
+ * file size then restrict this update to the end of the write.
  */
-STATIC int
+
+STATIC void
 xfs_setfilesize(
        xfs_ioend_t             *ioend)
 {
@@ -225,11 +222,9 @@ xfs_setfilesize(
        ASSERT(ioend->io_type != IOMAP_READ);
 
        if (unlikely(ioend->io_error))
-               return 0;
-
-       if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
-               return EAGAIN;
+               return;
 
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
        isize = xfs_ioend_new_eof(ioend);
        if (isize) {
                ip->i_d.di_size = isize;
@@ -237,28 +232,6 @@ xfs_setfilesize(
        }
 
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       return 0;
-}
-
-/*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
- */
-STATIC void
-xfs_finish_ioend(
-       xfs_ioend_t     *ioend,
-       int             wait)
-{
-       if (atomic_dec_and_test(&ioend->io_remaining)) {
-               struct workqueue_struct *wq;
-
-               wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
-                       xfsconvertd_workqueue : xfsdatad_workqueue;
-               queue_work(wq, &ioend->io_work);
-               if (wait)
-                       flush_workqueue(wq);
-       }
 }
 
 /*
@@ -270,23 +243,9 @@ xfs_end_bio_delalloc(
 {
        xfs_ioend_t             *ioend =
                container_of(work, xfs_ioend_t, io_work);
-       int                     error;
 
-       /*
-        * If we didn't complete processing of the ioend, requeue it to the
-        * tail of the workqueue for another attempt later. Otherwise destroy
-        * it.
-        */
-       error = xfs_setfilesize(ioend);
-       if (error == EAGAIN) {
-               atomic_inc(&ioend->io_remaining);
-               xfs_finish_ioend(ioend, 0);
-               /* ensure we don't spin on blocked ioends */
-               delay(1);
-       } else {
-               ASSERT(!error);
-               xfs_destroy_ioend(ioend);
-       }
+       xfs_setfilesize(ioend);
+       xfs_destroy_ioend(ioend);
 }
 
 /*
@@ -298,23 +257,9 @@ xfs_end_bio_written(
 {
        xfs_ioend_t             *ioend =
                container_of(work, xfs_ioend_t, io_work);
-       int                     error;
 
-       /*
-        * If we didn't complete processing of the ioend, requeue it to the
-        * tail of the workqueue for another attempt later. Otherwise destroy
-        * it.
-        */
-       error = xfs_setfilesize(ioend);
-       if (error == EAGAIN) {
-               atomic_inc(&ioend->io_remaining);
-               xfs_finish_ioend(ioend, 0);
-               /* ensure we don't spin on blocked ioends */
-               delay(1);
-       } else {
-               ASSERT(!error);
-               xfs_destroy_ioend(ioend);
-       }
+       xfs_setfilesize(ioend);
+       xfs_destroy_ioend(ioend);
 }
 
 /*
@@ -334,25 +279,13 @@ xfs_end_bio_unwritten(
        size_t                  size = ioend->io_size;
 
        if (likely(!ioend->io_error)) {
-               int     error;
                if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+                       int error;
                        error = xfs_iomap_write_unwritten(ip, offset, size);
                        if (error)
                                ioend->io_error = error;
                }
-               /*
-                * If we didn't complete processing of the ioend, requeue it to the
-                * tail of the workqueue for another attempt later. Otherwise destroy
-                * it.
-                */
-               error = xfs_setfilesize(ioend);
-               if (error == EAGAIN) {
-                       atomic_inc(&ioend->io_remaining);
-                       xfs_finish_ioend(ioend, 0);
-                       /* ensure we don't spin on blocked ioends */
-                       delay(1);
-                       return;
-               }
+               xfs_setfilesize(ioend);
        }
        xfs_destroy_ioend(ioend);
 }
@@ -370,6 +303,27 @@ xfs_end_bio_read(
        xfs_destroy_ioend(ioend);
 }
 
+/*
+ * Schedule IO completion handling on a xfsdatad if this was
+ * the final hold on this ioend. If we are asked to wait,
+ * flush the workqueue.
+ */
+STATIC void
+xfs_finish_ioend(
+       xfs_ioend_t     *ioend,
+       int             wait)
+{
+       if (atomic_dec_and_test(&ioend->io_remaining)) {
+               struct workqueue_struct *wq = xfsdatad_workqueue;
+               if (ioend->io_work.func == xfs_end_bio_unwritten)
+                       wq = xfsconvertd_workqueue;
+
+               queue_work(wq, &ioend->io_work);
+               if (wait)
+                       flush_workqueue(wq);
+       }
+}
+
 /*
  * Allocate and initialise an IO completion structure.
  * We need to track unwritten extent write completion here initially.
index 98fe0db674bcad47a7d40ab673a870f7a7ea9aba..5bb523d7f37e9263399444379f9ac16451b4dd62 100644 (file)
@@ -789,8 +789,6 @@ xfs_ioc_fsgetxattr(
 {
        struct fsxattr          fa;
 
-       memset(&fa, 0, sizeof(struct fsxattr));
-
        xfs_ilock(ip, XFS_ILOCK_SHARED);
        fa.fsx_xflags = xfs_ip2xflags(ip);
        fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
index 1f3b4b8f7dd45195b71e41f6ea4df2a6a0b110c9..cd42ef78f6b54e705ad604e3ccfc4eb1a3df3743 100644 (file)
@@ -573,8 +573,8 @@ xfs_vn_fallocate(
        bf.l_len = len;
 
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
-       error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
-                                      0, XFS_ATTR_NOLOCK);
+       error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
+                                     0, XFS_ATTR_NOLOCK);
        if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
            offset + len > i_size_read(inode))
                new_size = offset + len;
@@ -585,7 +585,7 @@ xfs_vn_fallocate(
 
                iattr.ia_valid = ATTR_SIZE;
                iattr.ia_size = new_size;
-               error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
+               error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
        }
 
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
index d95bfa27c62a745cf9c2e7921c070dcfeeb3a6d0..18a4b8e11df2d4241bcfafd59297c30e961241ad 100644 (file)
@@ -930,37 +930,13 @@ xfs_fs_alloc_inode(
  */
 STATIC void
 xfs_fs_destroy_inode(
-       struct inode            *inode)
+       struct inode    *inode)
 {
-       struct xfs_inode        *ip = XFS_I(inode);
-
-       xfs_itrace_entry(ip);
+       xfs_inode_t             *ip = XFS_I(inode);
 
        XFS_STATS_INC(vn_reclaim);
-
-       /* bad inode, get out here ASAP */
-       if (is_bad_inode(inode))
-               goto out_reclaim;
-
-       xfs_ioend_wait(ip);
-
-       ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
-
-       /*
-        * We should never get here with one of the reclaim flags already set.
-        */
-       ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
-       ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
-
-       /*
-        * We always use background reclaim here because even if the
-        * inode is clean, it still may be under IO and hence we have
-        * to take the flush lock. The background reclaim path handles
-        * this more efficiently than we can here, so simply let background
-        * reclaim tear down all inodes.
-        */
-out_reclaim:
-       xfs_inode_set_reclaim_tag(ip);
+       if (xfs_reclaim(ip))
+               panic("%s: cannot reclaim 0x%p\n", __func__, inode);
 }
 
 /*
@@ -1164,7 +1140,6 @@ xfs_fs_put_super(
 
        xfs_unmountfs(mp);
        xfs_freesb(mp);
-       xfs_inode_shrinker_unregister(mp);
        xfs_icsb_destroy_counters(mp);
        xfs_close_devices(mp);
        xfs_dmops_put(mp);
@@ -1324,8 +1299,6 @@ xfs_fs_remount(
 
        /* ro -> rw */
        if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
-               __uint64_t resblks;
-
                mp->m_flags &= ~XFS_MOUNT_RDONLY;
                if (mp->m_flags & XFS_MOUNT_BARRIER)
                        xfs_mountfs_check_barriers(mp);
@@ -1343,37 +1316,11 @@ xfs_fs_remount(
                        }
                        mp->m_update_flags = 0;
                }
-
-               /*
-                * Fill out the reserve pool if it is empty. Use the stashed
-                * value if it is non-zero, otherwise go with the default.
-                */
-               if (mp->m_resblks_save) {
-                       resblks = mp->m_resblks_save;
-                       mp->m_resblks_save = 0;
-               } else {
-                       resblks = mp->m_sb.sb_dblocks;
-                       do_div(resblks, 20);
-                       resblks = min_t(__uint64_t, resblks, 1024);
-               }
-               xfs_reserve_blocks(mp, &resblks, NULL);
        }
 
        /* rw -> ro */
        if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
-               /*
-                * After we have synced the data but before we sync the
-                * metadata, we need to free up the reserve block pool so that
-                * the used block count in the superblock on disk is correct at
-                * the end of the remount. Stash the current reserve pool size
-                * so that if we get remounted rw, we can return it to the same
-                * size.
-                */
-               __uint64_t resblks = 0;
-
                xfs_quiesce_data(mp);
-               mp->m_resblks_save = mp->m_resblks;
-               xfs_reserve_blocks(mp, &resblks, NULL);
                xfs_quiesce_attr(mp);
                mp->m_flags |= XFS_MOUNT_RDONLY;
        }
@@ -1556,8 +1503,6 @@ xfs_fs_fill_super(
        if (error)
                goto fail_vnrele;
 
-       xfs_inode_shrinker_register(mp);
-
        kfree(mtpt);
 
        xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
@@ -1897,7 +1842,6 @@ init_xfs_fs(void)
                goto out_cleanup_procfs;
 
        vfs_initquota();
-       xfs_inode_shrinker_init();
 
        error = register_filesystem(&xfs_fs_type);
        if (error)
@@ -1927,7 +1871,6 @@ exit_xfs_fs(void)
 {
        vfs_exitquota();
        unregister_filesystem(&xfs_fs_type);
-       xfs_inode_shrinker_destroy();
        xfs_sysctl_unregister();
        xfs_cleanup_procfs();
        xfs_buf_terminate();
index c82683ad1484aea5d1fa1636619c25d51667707c..961df0a22c7837e9a969a8ab73331c594d5a6116 100644 (file)
@@ -64,6 +64,7 @@ xfs_inode_ag_lookup(
         * as the tree is sparse and a gang lookup walks to find
         * the number of objects requested.
         */
+       read_lock(&pag->pag_ici_lock);
        if (tag == XFS_ICI_NO_TAG) {
                nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
                                (void **)&ip, *first_index, 1);
@@ -72,7 +73,7 @@ xfs_inode_ag_lookup(
                                (void **)&ip, *first_index, 1, tag);
        }
        if (!nr_found)
-               return NULL;
+               goto unlock;
 
        /*
         * Update the index for the next lookup. Catch overflows
@@ -82,8 +83,13 @@ xfs_inode_ag_lookup(
         */
        *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
        if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
-               return NULL;
+               goto unlock;
+
        return ip;
+
+unlock:
+       read_unlock(&pag->pag_ici_lock);
+       return NULL;
 }
 
 STATIC int
@@ -93,9 +99,7 @@ xfs_inode_ag_walk(
        int                     (*execute)(struct xfs_inode *ip,
                                           struct xfs_perag *pag, int flags),
        int                     flags,
-       int                     tag,
-       int                     exclusive,
-       int                     *nr_to_scan)
+       int                     tag)
 {
        struct xfs_perag        *pag = &mp->m_perag[ag];
        uint32_t                first_index;
@@ -109,20 +113,10 @@ restart:
                int             error = 0;
                xfs_inode_t     *ip;
 
-               if (exclusive)
-                       write_lock(&pag->pag_ici_lock);
-               else
-                       read_lock(&pag->pag_ici_lock);
                ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
-               if (!ip) {
-                       if (exclusive)
-                               write_unlock(&pag->pag_ici_lock);
-                       else
-                               read_unlock(&pag->pag_ici_lock);
+               if (!ip)
                        break;
-               }
 
-               /* execute releases pag->pag_ici_lock */
                error = execute(ip, pag, flags);
                if (error == EAGAIN) {
                        skipped++;
@@ -130,12 +124,13 @@ restart:
                }
                if (error)
                        last_error = error;
-
-               /* bail out if the filesystem is corrupted.  */
+               /*
+                * bail out if the filesystem is corrupted.
+                */
                if (error == EFSCORRUPTED)
                        break;
 
-       } while ((*nr_to_scan)--);
+       } while (1);
 
        if (skipped) {
                delay(1);
@@ -152,31 +147,22 @@ xfs_inode_ag_iterator(
        int                     (*execute)(struct xfs_inode *ip,
                                           struct xfs_perag *pag, int flags),
        int                     flags,
-       int                     tag,
-       int                     exclusive,
-       int                     *nr_to_scan)
+       int                     tag)
 {
        int                     error = 0;
        int                     last_error = 0;
        xfs_agnumber_t          ag;
-       int                     nr;
 
-       nr = nr_to_scan ? *nr_to_scan : INT_MAX;
        for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
                if (!mp->m_perag[ag].pag_ici_init)
                        continue;
-               error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
-                                               exclusive, &nr);
+               error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
                if (error) {
                        last_error = error;
                        if (error == EFSCORRUPTED)
                                break;
                }
-               if (nr <= 0)
-                       break;
        }
-       if (nr_to_scan)
-               *nr_to_scan = nr;
        return XFS_ERROR(last_error);
 }
 
@@ -187,31 +173,30 @@ xfs_sync_inode_valid(
        struct xfs_perag        *pag)
 {
        struct inode            *inode = VFS_I(ip);
-       int                     error = EFSCORRUPTED;
 
        /* nothing to sync during shutdown */
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
-               goto out_unlock;
-
-       /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
-       error = ENOENT;
-       if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
-               goto out_unlock;
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+               read_unlock(&pag->pag_ici_lock);
+               return EFSCORRUPTED;
+       }
 
-       /* If we can't grab the inode, it must on it's way to reclaim. */
-       if (!igrab(inode))
-               goto out_unlock;
+       /*
+        * If we can't get a reference on the inode, it must be in reclaim.
+        * Leave it for the reclaim code to flush. Also avoid inodes that
+        * haven't been fully initialised.
+        */
+       if (!igrab(inode)) {
+               read_unlock(&pag->pag_ici_lock);
+               return ENOENT;
+       }
+       read_unlock(&pag->pag_ici_lock);
 
-       if (is_bad_inode(inode)) {
+       if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
                IRELE(ip);
-               goto out_unlock;
+               return ENOENT;
        }
 
-       /* inode is valid */
-       error = 0;
-out_unlock:
-       read_unlock(&pag->pag_ici_lock);
-       return error;
+       return 0;
 }
 
 STATIC int
@@ -296,7 +281,7 @@ xfs_sync_data(
        ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
 
        error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
-                                     XFS_ICI_NO_TAG, 0, NULL);
+                                     XFS_ICI_NO_TAG);
        if (error)
                return XFS_ERROR(error);
 
@@ -318,7 +303,7 @@ xfs_sync_attr(
        ASSERT((flags & ~SYNC_WAIT) == 0);
 
        return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
-                                    XFS_ICI_NO_TAG, 0, NULL);
+                                    XFS_ICI_NO_TAG);
 }
 
 STATIC int
@@ -678,71 +663,35 @@ xfs_syncd_stop(
        kthread_stop(mp->m_sync_task);
 }
 
-void
-__xfs_inode_set_reclaim_tag(
-       struct xfs_perag        *pag,
-       struct xfs_inode        *ip)
-{
-       radix_tree_tag_set(&pag->pag_ici_root,
-                          XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
-                          XFS_ICI_RECLAIM_TAG);
-       pag->pag_ici_reclaimable++;
-}
-
-/*
- * We set the inode flag atomically with the radix tree tag.
- * Once we get tag lookups on the radix tree, this inode flag
- * can go away.
- */
-void
-xfs_inode_set_reclaim_tag(
-       xfs_inode_t     *ip)
-{
-       xfs_mount_t     *mp = ip->i_mount;
-       xfs_perag_t     *pag = xfs_get_perag(mp, ip->i_ino);
-
-       write_lock(&pag->pag_ici_lock);
-       spin_lock(&ip->i_flags_lock);
-       __xfs_inode_set_reclaim_tag(pag, ip);
-       __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
-       spin_unlock(&ip->i_flags_lock);
-       write_unlock(&pag->pag_ici_lock);
-       xfs_put_perag(mp, pag);
-}
-
-void
-__xfs_inode_clear_reclaim_tag(
-       xfs_mount_t     *mp,
-       xfs_perag_t     *pag,
-       xfs_inode_t     *ip)
-{
-       radix_tree_tag_clear(&pag->pag_ici_root,
-                       XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
-       pag->pag_ici_reclaimable--;
-}
-
-STATIC int
+int
 xfs_reclaim_inode(
-       struct xfs_inode        *ip,
-       struct xfs_perag        *pag,
-       int                     sync_mode)
+       xfs_inode_t     *ip,
+       int             locked,
+       int             sync_mode)
 {
-       /*
-        * The radix tree lock here protects a thread in xfs_iget from racing
-        * with us starting reclaim on the inode.  Once we have the
-        * XFS_IRECLAIM flag set it will not touch us.
+       xfs_perag_t     *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
+
+       /* The hash lock here protects a thread in xfs_iget_core from
+        * racing with us on linking the inode back with a vnode.
+        * Once we have the XFS_IRECLAIM flag set it will not touch
+        * us.
         */
+       write_lock(&pag->pag_ici_lock);
        spin_lock(&ip->i_flags_lock);
-       ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
-       if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
-               /* ignore as it is already under reclaim */
+       if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
+           !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
                spin_unlock(&ip->i_flags_lock);
                write_unlock(&pag->pag_ici_lock);
-               return 0;
+               if (locked) {
+                       xfs_ifunlock(ip);
+                       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+               }
+               return -EAGAIN;
        }
        __xfs_iflags_set(ip, XFS_IRECLAIM);
        spin_unlock(&ip->i_flags_lock);
        write_unlock(&pag->pag_ici_lock);
+       xfs_put_perag(ip->i_mount, pag);
 
        /*
         * If the inode is still dirty, then flush it out.  If the inode
@@ -755,8 +704,10 @@ xfs_reclaim_inode(
         * We get the flush lock regardless, though, just to make sure
         * we don't free it while it is being flushed.
         */
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_iflock(ip);
+       if (!locked) {
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               xfs_iflock(ip);
+       }
 
        /*
         * In the case of a forced shutdown we rely on xfs_iflush() to
@@ -773,94 +724,68 @@ xfs_reclaim_inode(
        return 0;
 }
 
-int
-xfs_reclaim_inodes(
-       xfs_mount_t     *mp,
-       int             mode)
+void
+__xfs_inode_set_reclaim_tag(
+       struct xfs_perag        *pag,
+       struct xfs_inode        *ip)
 {
-       return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
-                                       XFS_ICI_RECLAIM_TAG, 1, NULL);
+       radix_tree_tag_set(&pag->pag_ici_root,
+                          XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
+                          XFS_ICI_RECLAIM_TAG);
 }
 
 /*
- * Shrinker infrastructure.
- *
- * This is all far more complex than it needs to be. It adds a global list of
- * mounts because the shrinkers can only call a global context. We need to make
- * the shrinkers pass a context to avoid the need for global state.
+ * We set the inode flag atomically with the radix tree tag.
+ * Once we get tag lookups on the radix tree, this inode flag
+ * can go away.
  */
-static LIST_HEAD(xfs_mount_list);
-static struct rw_semaphore xfs_mount_list_lock;
-
-static int
-xfs_reclaim_inode_shrink(
-       int             nr_to_scan,
-       gfp_t           gfp_mask)
+void
+xfs_inode_set_reclaim_tag(
+       xfs_inode_t     *ip)
 {
-       struct xfs_mount *mp;
-       xfs_agnumber_t  ag;
-       int             reclaimable = 0;
-
-       if (nr_to_scan) {
-               if (!(gfp_mask & __GFP_FS))
-                       return -1;
-
-               down_read(&xfs_mount_list_lock);
-               list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
-                       xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
-                                       XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
-                       if (nr_to_scan <= 0)
-                               break;
-               }
-               up_read(&xfs_mount_list_lock);
-       }
-
-       down_read(&xfs_mount_list_lock);
-       list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
-               for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
-
-                       if (!mp->m_perag[ag].pag_ici_init)
-                               continue;
-                       reclaimable += mp->m_perag[ag].pag_ici_reclaimable;
-               }
-       }
-       up_read(&xfs_mount_list_lock);
-       return reclaimable;
-}
-
-static struct shrinker xfs_inode_shrinker = {
-       .shrink = xfs_reclaim_inode_shrink,
-       .seeks = DEFAULT_SEEKS,
-};
+       xfs_mount_t     *mp = ip->i_mount;
+       xfs_perag_t     *pag = xfs_get_perag(mp, ip->i_ino);
 
-void __init
-xfs_inode_shrinker_init(void)
-{
-       init_rwsem(&xfs_mount_list_lock);
-       register_shrinker(&xfs_inode_shrinker);
+       read_lock(&pag->pag_ici_lock);
+       spin_lock(&ip->i_flags_lock);
+       __xfs_inode_set_reclaim_tag(pag, ip);
+       __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
+       spin_unlock(&ip->i_flags_lock);
+       read_unlock(&pag->pag_ici_lock);
+       xfs_put_perag(mp, pag);
 }
 
 void
-xfs_inode_shrinker_destroy(void)
+__xfs_inode_clear_reclaim_tag(
+       xfs_mount_t     *mp,
+       xfs_perag_t     *pag,
+       xfs_inode_t     *ip)
 {
-       ASSERT(list_empty(&xfs_mount_list));
-       unregister_shrinker(&xfs_inode_shrinker);
+       radix_tree_tag_clear(&pag->pag_ici_root,
+                       XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
 }
 
-void
-xfs_inode_shrinker_register(
-       struct xfs_mount        *mp)
+STATIC int
+xfs_reclaim_inode_now(
+       struct xfs_inode        *ip,
+       struct xfs_perag        *pag,
+       int                     flags)
 {
-       down_write(&xfs_mount_list_lock);
-       list_add_tail(&mp->m_mplist, &xfs_mount_list);
-       up_write(&xfs_mount_list_lock);
+       /* ignore if already under reclaim */
+       if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
+               read_unlock(&pag->pag_ici_lock);
+               return 0;
+       }
+       read_unlock(&pag->pag_ici_lock);
+
+       return xfs_reclaim_inode(ip, 0, flags);
 }
 
-void
-xfs_inode_shrinker_unregister(
-       struct xfs_mount        *mp)
+int
+xfs_reclaim_inodes(
+       xfs_mount_t     *mp,
+       int             mode)
 {
-       down_write(&xfs_mount_list_lock);
-       list_del(&mp->m_mplist);
-       up_write(&xfs_mount_list_lock);
+       return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
+                                       XFS_ICI_RECLAIM_TAG);
 }
index 0b28c13bdf9455a6a406b57d4785554e90bdb7c3..27920eb7a820cbe77b2e7799bf441a5da8dd0930 100644 (file)
@@ -44,6 +44,7 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
 
 void xfs_flush_inodes(struct xfs_inode *ip);
 
+int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
 int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
 
 void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
@@ -54,11 +55,6 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
 int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
 int xfs_inode_ag_iterator(struct xfs_mount *mp,
        int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
-       int flags, int tag, int write_lock, int *nr_to_scan);
-
-void xfs_inode_shrinker_init(void);
-void xfs_inode_shrinker_destroy(void);
-void xfs_inode_shrinker_register(struct xfs_mount *mp);
-void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
+       int flags, int tag);
 
 #endif
index 97b410c12794d5ddf52579625d4a256ed384d872..a5346630dfae05d639a0cee2a24cadbaac057cad 100644 (file)
@@ -59,7 +59,7 @@ xfs_fill_statvfs_from_dquot(
                be64_to_cpu(dp->d_blk_hardlimit);
        if (limit && statp->f_blocks > limit) {
                statp->f_blocks = limit;
-               statp->f_bfree = statp->f_bavail =
+               statp->f_bfree =
                        (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
                         (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
        }
index 60fe35821cff740eff6f615a07c21fda93d2941e..5d1a3b98a6e68875a47dc283483a9be376cdce42 100644 (file)
@@ -893,8 +893,7 @@ xfs_qm_dqrele_all_inodes(
        uint             flags)
 {
        ASSERT(mp->m_quotainfo);
-       xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
-                               XFS_ICI_NO_TAG, 0, NULL);
+       xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG);
 }
 
 /*------------------------------------------------------------------------*/
index 381fba77b28d5bdd230fc12453cff08b82bbe0f0..a5d54bf4931b583dfd3729d670cd0694e7b7c507 100644 (file)
@@ -215,7 +215,6 @@ typedef struct xfs_perag
        int             pag_ici_init;   /* incore inode cache initialised */
        rwlock_t        pag_ici_lock;   /* incore inode lock */
        struct radix_tree_root pag_ici_root;    /* incore inode cache root */
-       int             pag_ici_reclaimable;    /* reclaimable inodes */
 #endif
 } xfs_perag_t;
 
index 4cd1c23b77f00a48bb407cbd2061966b7a66c7e4..2cf944eb796daf4ca455865d3dd22c83ee6d13ee 100644 (file)
@@ -2703,35 +2703,45 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
        xfs_mount_t             *mp;
        xfs_perag_busy_t        *bsy;
        xfs_agblock_t           uend, bend;
-       xfs_lsn_t               lsn = 0;
+       xfs_lsn_t               lsn;
        int                     cnt;
 
        mp = tp->t_mountp;
 
        spin_lock(&mp->m_perag[agno].pagb_lock);
+       cnt = mp->m_perag[agno].pagb_count;
+
        uend = bno + len - 1;
 
-       /*
-        * search pagb_list for this slot, skipping open slots. We have to
-        * search the entire array as there may be multiple overlaps and
-        * we have to get the most recent LSN for the log force to push out
-        * all the transactions that span the range.
-        */
-       for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) {
-               bsy = &mp->m_perag[agno].pagb_list[cnt];
-               if (!bsy->busy_tp)
-                       continue;
-               bend = bsy->busy_start + bsy->busy_length - 1;
-               if (bno > bend || uend < bsy->busy_start)
-                       continue;
+       /* search pagb_list for this slot, skipping open slots */
+       for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) {
 
-               /* (start1,length1) within (start2, length2) */
-               if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0)
-                       lsn = bsy->busy_tp->t_commit_lsn;
+               /*
+                * (start1,length1) within (start2, length2)
+                */
+               if (bsy->busy_tp != NULL) {
+                       bend = bsy->busy_start + bsy->busy_length - 1;
+                       if ((bno > bend) || (uend < bsy->busy_start)) {
+                               cnt--;
+                       } else {
+                               TRACE_BUSYSEARCH("xfs_alloc_search_busy",
+                                        "found1", agno, bno, len, tp);
+                               break;
+                       }
+               }
        }
-       spin_unlock(&mp->m_perag[agno].pagb_lock);
-       TRACE_BUSYSEARCH("xfs_alloc_search_busy", lsn ? "found" : "not-found",
-                                               agno, bno, len, tp);
-       if (lsn)
+
+       /*
+        * If a block was found, force the log through the LSN of the
+        * transaction that freed the block
+        */
+       if (cnt) {
+               TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
+               lsn = bsy->busy_tp->t_commit_lsn;
+               spin_unlock(&mp->m_perag[agno].pagb_lock);
                xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
+       } else {
+               TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
+               spin_unlock(&mp->m_perag[agno].pagb_lock);
+       }
 }
index e4ccd9f24829aeef169c845c78cae2ce11b6f899..ab89a7e94a0fd4b861114f656b99e5fcf0355a14 100644 (file)
@@ -62,9 +62,7 @@ xfs_swapext(
                goto out;
        }
 
-       if (!(file->f_mode & FMODE_WRITE) ||
-           !(file->f_mode & FMODE_READ) ||
-           (file->f_flags & O_APPEND)) {
+       if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) {
                error = XFS_ERROR(EBADF);
                goto out_put_file;
        }
@@ -76,7 +74,6 @@ xfs_swapext(
        }
 
        if (!(target_file->f_mode & FMODE_WRITE) ||
-           !(target_file->f_mode & FMODE_READ) ||
            (target_file->f_flags & O_APPEND)) {
                error = XFS_ERROR(EBADF);
                goto out_put_target_file;
@@ -116,82 +113,10 @@ xfs_swapext(
        return error;
 }
 
-/*
- * We need to check that the format of the data fork in the temporary inode is
- * valid for the target inode before doing the swap. This is not a problem with
- * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
- * data fork depending on the space the attribute fork is taking so we can get
- * invalid formats on the target inode.
- *
- * E.g. target has space for 7 extents in extent format, temp inode only has
- * space for 6.  If we defragment down to 7 extents, then the tmp format is a
- * btree, but when swapped it needs to be in extent format. Hence we can't just
- * blindly swap data forks on attr2 filesystems.
- *
- * Note that we check the swap in both directions so that we don't end up with
- * a corrupt temporary inode, either.
- *
- * Note that fixing the way xfs_fsr sets up the attribute fork in the source
- * inode will prevent this situation from occurring, so all we do here is
- * reject and log the attempt. basically we are putting the responsibility on
- * userspace to get this right.
- */
-static int
-xfs_swap_extents_check_format(
-       xfs_inode_t     *ip,    /* target inode */
-       xfs_inode_t     *tip)   /* tmp inode */
-{
-
-       /* Should never get a local format */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
-           tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
-               return EINVAL;
-
-       /*
-        * if the target inode has less extents that then temporary inode then
-        * why did userspace call us?
-        */
-       if (ip->i_d.di_nextents < tip->i_d.di_nextents)
-               return EINVAL;
-
-       /*
-        * if the target inode is in extent form and the temp inode is in btree
-        * form then we will end up with the target inode in the wrong format
-        * as we already know there are less extents in the temp inode.
-        */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
-               return EINVAL;
-
-       /* Check temp in extent form to max in target */
-       if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > ip->i_df.if_ext_max)
-               return EINVAL;
-
-       /* Check target in extent form to max in temp */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
-               return EINVAL;
-
-       /* Check root block of temp in btree form to max in target */
-       if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
-           XFS_IFORK_BOFF(ip) &&
-           tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
-               return EINVAL;
-
-       /* Check root block of target in btree form to max in temp */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
-           XFS_IFORK_BOFF(tip) &&
-           ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
-               return EINVAL;
-
-       return 0;
-}
-
 int
 xfs_swap_extents(
-       xfs_inode_t     *ip,    /* target inode */
-       xfs_inode_t     *tip,   /* tmp inode */
+       xfs_inode_t     *ip,
+       xfs_inode_t     *tip,
        xfs_swapext_t   *sxp)
 {
        xfs_mount_t     *mp;
@@ -235,6 +160,13 @@ xfs_swap_extents(
                goto out_unlock;
        }
 
+       /* Should never get a local format */
+       if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
+           tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
+
        if (VN_CACHED(VFS_I(tip)) != 0) {
                xfs_inval_cached_trace(tip, 0, -1, 0, -1);
                error = xfs_flushinval_pages(tip, 0, -1,
@@ -257,12 +189,13 @@ xfs_swap_extents(
                goto out_unlock;
        }
 
-       /* check inode formats now that data is flushed */
-       error = xfs_swap_extents_check_format(ip, tip);
-       if (error) {
-               xfs_fs_cmn_err(CE_NOTE, mp,
-                   "%s: inode 0x%llx format is incompatible for exchanging.",
-                               __FILE__, ip->i_ino);
+       /*
+        * If the target has extended attributes, the tmp file
+        * must also in order to ensure the correct data fork
+        * format.
+        */
+       if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) {
+               error = XFS_ERROR(EINVAL);
                goto out_unlock;
        }
 
@@ -342,16 +275,6 @@ xfs_swap_extents(
        *ifp = *tifp;           /* struct copy */
        *tifp = *tempifp;       /* struct copy */
 
-       /*
-        * Fix the in-memory data fork values that are dependent on the fork
-        * offset in the inode. We can't assume they remain the same as attr2
-        * has dynamic fork offsets.
-        */
-       ifp->if_ext_max = XFS_IFORK_SIZE(ip, XFS_DATA_FORK) /
-                                       (uint)sizeof(xfs_bmbt_rec_t);
-       tifp->if_ext_max = XFS_IFORK_SIZE(tip, XFS_DATA_FORK) /
-                                       (uint)sizeof(xfs_bmbt_rec_t);
-
        /*
         * Fix the on-disk inode values
         */
index 6f83f58c099fee2db91d80f6d31c51f3b46f6830..2d0b3e1da9e69094f798967e3ff432d679249f00 100644 (file)
@@ -611,7 +611,7 @@ xfs_fs_log_dummy(
        xfs_inode_t     *ip;
        int             error;
 
-       tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
+       tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
        error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
index a04a72fbbb654f599971d69573cefcfe1999536d..80e526489be5d7ee3dc415ec592fe2807a879a01 100644 (file)
@@ -228,12 +228,13 @@ xfs_iget_cache_hit(
                xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
 
                /*
-                * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
-                * from stomping over us while we recycle the inode.  We can't
-                * clear the radix tree reclaimable tag yet as it requires
-                * pag_ici_lock to be held exclusive.
+                * We need to set XFS_INEW atomically with clearing the
+                * reclaimable tag so that we do have an indicator of the
+                * inode still being initialized.
                 */
-               ip->i_flags |= XFS_IRECLAIM;
+               ip->i_flags |= XFS_INEW;
+               ip->i_flags &= ~XFS_IRECLAIMABLE;
+               __xfs_inode_clear_reclaim_tag(mp, pag, ip);
 
                spin_unlock(&ip->i_flags_lock);
                read_unlock(&pag->pag_ici_lock);
@@ -252,15 +253,7 @@ xfs_iget_cache_hit(
                        __xfs_inode_set_reclaim_tag(pag, ip);
                        goto out_error;
                }
-
-               write_lock(&pag->pag_ici_lock);
-               spin_lock(&ip->i_flags_lock);
-               ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
-               ip->i_flags |= XFS_INEW;
-               __xfs_inode_clear_reclaim_tag(mp, pag, ip);
                inode->i_state = I_LOCK|I_NEW;
-               spin_unlock(&ip->i_flags_lock);
-               write_unlock(&pag->pag_ici_lock);
        } else {
                /* If the VFS inode is being torn down, pause and try again. */
                if (!igrab(inode)) {
@@ -518,21 +511,17 @@ xfs_ireclaim(
 {
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_perag        *pag;
-       xfs_agino_t             agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
 
        XFS_STATS_INC(xs_ig_reclaims);
 
        /*
-        * Remove the inode from the per-AG radix tree.
-        *
-        * Because radix_tree_delete won't complain even if the item was never
-        * added to the tree assert that it's been there before to catch
-        * problems with the inode life time early on.
+        * Remove the inode from the per-AG radix tree.  It doesn't matter
+        * if it was never added to it because radix_tree_delete can deal
+        * with that case just fine.
         */
        pag = xfs_get_perag(mp, ip->i_ino);
        write_lock(&pag->pag_ici_lock);
-       if (!radix_tree_delete(&pag->pag_ici_root, agino))
-               ASSERT(0);
+       radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
        write_unlock(&pag->pag_ici_lock);
        xfs_put_perag(mp, pag);
 
index 523a1ae4964d6efef646469e1398c272cf1b0281..b92a4fa2a0a12eda161c2ff897bd820f5cbde1e8 100644 (file)
@@ -2877,8 +2877,8 @@ xfs_iflush(
        mp = ip->i_mount;
 
        /*
-        * If the inode isn't dirty, then just release the inode flush lock and
-        * do nothing.
+        * If the inode isn't dirty, then just release the inode
+        * flush lock and do nothing.
         */
        if (xfs_inode_clean(ip)) {
                xfs_ifunlock(ip);
@@ -2903,19 +2903,6 @@ xfs_iflush(
        }
        xfs_iunpin_wait(ip);
 
-       /*
-        * For stale inodes we cannot rely on the backing buffer remaining
-        * stale in cache for the remaining life of the stale inode and so
-        * xfs_itobp() below may give us a buffer that no longer contains
-        * inodes below. We have to check this after ensuring the inode is
-        * unpinned so that it is safe to reclaim the stale inode after the
-        * flush call.
-        */
-       if (xfs_iflags_test(ip, XFS_ISTALE)) {
-               xfs_ifunlock(ip);
-               return 0;
-       }
-
        /*
         * This may have been unpinned because the filesystem is shutting
         * down forcibly. If that's the case we must not write this inode
index 7294abce6ef2f4d7d7e7ecae29dd377b14a4a2b3..67ae5555a30a6e8be0bd415b1596bea17854add5 100644 (file)
@@ -860,15 +860,8 @@ xfs_iomap_write_unwritten(
                 * set up a transaction to convert the range of extents
                 * from unwritten to real. Do allocations in a loop until
                 * we have covered the range passed in.
-                *
-                * Note that we open code the transaction allocation here
-                * to pass KM_NOFS--we can't risk to recursing back into
-                * the filesystem here as we might be asked to write out
-                * the same inode that we complete here and might deadlock
-                * on the iolock.
                 */
-               xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
-               tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
+               tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
                tp->t_flags |= XFS_TRANS_RESERVE;
                error = xfs_trans_reserve(tp, resblks,
                                XFS_WRITE_LOG_RES(mp), 0,
index b5b0d80559108a085131f92d3ca8b1ba8a593828..fb17f8226b0955eddc3ee3130708750f72fa3f29 100644 (file)
@@ -3517,7 +3517,7 @@ xlog_do_recovery_pass(
 {
        xlog_rec_header_t       *rhead;
        xfs_daddr_t             blk_no;
-       xfs_caddr_t             offset;
+       xfs_caddr_t             bufaddr, offset;
        xfs_buf_t               *hbp, *dbp;
        int                     error = 0, h_size;
        int                     bblks, split_bblks;
@@ -3610,7 +3610,7 @@ xlog_do_recovery_pass(
                        /*
                         * Check for header wrapping around physical end-of-log
                         */
-                       offset = XFS_BUF_PTR(hbp);
+                       offset = NULL;
                        split_hblks = 0;
                        wrapped_hblks = 0;
                        if (blk_no + hblks <= log->l_logBBsize) {
@@ -3646,8 +3646,9 @@ xlog_do_recovery_pass(
                                 *   - order is important.
                                 */
                                wrapped_hblks = hblks - split_hblks;
+                               bufaddr = XFS_BUF_PTR(hbp);
                                error = XFS_BUF_SET_PTR(hbp,
-                                               offset + BBTOB(split_hblks),
+                                               bufaddr + BBTOB(split_hblks),
                                                BBTOB(hblks - split_hblks));
                                if (error)
                                        goto bread_err2;
@@ -3657,10 +3658,14 @@ xlog_do_recovery_pass(
                                if (error)
                                        goto bread_err2;
 
-                               error = XFS_BUF_SET_PTR(hbp, offset,
+                               error = XFS_BUF_SET_PTR(hbp, bufaddr,
                                                        BBTOB(hblks));
                                if (error)
                                        goto bread_err2;
+
+                               if (!offset)
+                                       offset = xlog_align(log, 0,
+                                                       wrapped_hblks, hbp);
                        }
                        rhead = (xlog_rec_header_t *)offset;
                        error = xlog_valid_rec_header(log, rhead,
@@ -3680,7 +3685,7 @@ xlog_do_recovery_pass(
                        } else {
                                /* This log record is split across the
                                 * physical end of log */
-                               offset = XFS_BUF_PTR(dbp);
+                               offset = NULL;
                                split_bblks = 0;
                                if (blk_no != log->l_logBBsize) {
                                        /* some data is before the physical
@@ -3709,8 +3714,9 @@ xlog_do_recovery_pass(
                                 *   _first_, then the log start (LR header end)
                                 *   - order is important.
                                 */
+                               bufaddr = XFS_BUF_PTR(dbp);
                                error = XFS_BUF_SET_PTR(dbp,
-                                               offset + BBTOB(split_bblks),
+                                               bufaddr + BBTOB(split_bblks),
                                                BBTOB(bblks - split_bblks));
                                if (error)
                                        goto bread_err2;
@@ -3721,9 +3727,13 @@ xlog_do_recovery_pass(
                                if (error)
                                        goto bread_err2;
 
-                               error = XFS_BUF_SET_PTR(dbp, offset, h_size);
+                               error = XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
                                if (error)
                                        goto bread_err2;
+
+                               if (!offset)
+                                       offset = xlog_align(log, wrapped_hblks,
+                                               bblks - split_bblks, dbp);
                        }
                        xlog_unpack_data(rhead, offset, log);
                        if ((error = xlog_recover_process_data(log, rhash,
index 4d509f742bd20aba743310378c8c5a8a2b327363..8b6c9e807efb7b8a47bf493563bd5d8dadde3f4f 100644 (file)
@@ -1471,7 +1471,7 @@ xfs_log_sbcount(
        if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
                return 0;
 
-       tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
+       tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT);
        error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
                                        XFS_DEFAULT_LOG_COUNT);
        if (error) {
index 08fdb6d43efd0cfaffc07716bdbdbd1b21d5d4d5..a6c023bc0fb27191d895578fe910e052840a804a 100644 (file)
@@ -209,7 +209,6 @@ typedef struct xfs_mount {
        __uint64_t              m_maxioffset;   /* maximum inode offset */
        __uint64_t              m_resblks;      /* total reserved blocks */
        __uint64_t              m_resblks_avail;/* available reserved blocks */
-       __uint64_t              m_resblks_save; /* reserved blks @ remount,ro */
        int                     m_dalign;       /* stripe unit */
        int                     m_swidth;       /* stripe width */
        int                     m_sinoalign;    /* stripe unit inode alignment */
@@ -243,7 +242,6 @@ typedef struct xfs_mount {
        wait_queue_head_t       m_wait_single_sync_task;
        __int64_t               m_update_flags; /* sb flags we need to update
                                                   on the next remount,rw */
-       struct list_head        m_mplist;       /* inode shrinker mount list */
 } xfs_mount_t;
 
 /*
index 726014d1c925f9cf398492a79528839e07790000..f5e4874c37d8ecfa55a77a1a1eeca31c7a6d27df 100644 (file)
@@ -36,6 +36,13 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
                 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
 }
 
+/*
+ * Flags for xfs_free_eofblocks
+ */
+#define XFS_FREE_EOF_LOCK      (1<<0)
+#define XFS_FREE_EOF_NOLOCK    (1<<1)
+
+
 /*
  * helper function to extract extent size hint from inode
  */
index 237badcbac3bcbbc9fbbba8ecea9229cdac80f08..66b849358e62d16e9a01fc307bc5a03dc1605ffb 100644 (file)
@@ -236,20 +236,19 @@ xfs_trans_alloc(
        uint            type)
 {
        xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
-       return _xfs_trans_alloc(mp, type, KM_SLEEP);
+       return _xfs_trans_alloc(mp, type);
 }
 
 xfs_trans_t *
 _xfs_trans_alloc(
        xfs_mount_t     *mp,
-       uint            type,
-       uint            memflags)
+       uint            type)
 {
        xfs_trans_t     *tp;
 
        atomic_inc(&mp->m_active_trans);
 
-       tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
+       tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
        tp->t_magic = XFS_TRANS_MAGIC;
        tp->t_type = type;
        tp->t_mountp = mp;
index a0574f593f5272764adf820c12ad724c9d90011e..ed47fc77759c7cce24f98f283a717fae33fa9eea 100644 (file)
@@ -924,7 +924,7 @@ typedef struct xfs_trans {
  * XFS transaction mechanism exported interfaces.
  */
 xfs_trans_t    *xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t    *_xfs_trans_alloc(struct xfs_mount *, uint, uint);
+xfs_trans_t    *_xfs_trans_alloc(struct xfs_mount *, uint);
 xfs_trans_t    *xfs_trans_dup(xfs_trans_t *);
 int            xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
                                  uint, uint);
index 38a63244e75dda8d89fb1e17b0009ce1c491998b..b572f7e840e0b82a372225581d87c85c104dfec6 100644 (file)
@@ -69,6 +69,7 @@ xfs_setattr(
        uint                    commit_flags=0;
        uid_t                   uid=0, iuid=0;
        gid_t                   gid=0, igid=0;
+       int                     timeflags = 0;
        struct xfs_dquot        *udqp, *gdqp, *olddquot1, *olddquot2;
        int                     need_iolock = 1;
 
@@ -133,13 +134,16 @@ xfs_setattr(
        if (flags & XFS_ATTR_NOLOCK)
                need_iolock = 0;
        if (!(mask & ATTR_SIZE)) {
-               tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
-               commit_flags = 0;
-               code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
-                                        0, 0, 0);
-               if (code) {
-                       lock_flags = 0;
-                       goto error_return;
+               if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) ||
+                   (mp->m_flags & XFS_MOUNT_WSYNC)) {
+                       tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+                       commit_flags = 0;
+                       if ((code = xfs_trans_reserve(tp, 0,
+                                                    XFS_ICHANGE_LOG_RES(mp), 0,
+                                                    0, 0))) {
+                               lock_flags = 0;
+                               goto error_return;
+                       }
                }
        } else {
                if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
@@ -290,23 +294,15 @@ xfs_setattr(
                 * or we are explicitly asked to change it. This handles
                 * the semantic difference between truncate() and ftruncate()
                 * as implemented in the VFS.
-                *
-                * The regular truncate() case without ATTR_CTIME and ATTR_MTIME
-                * is a special case where we need to update the times despite
-                * not having these flags set.  For all other operations the
-                * VFS set these flags explicitly if it wants a timestamp
-                * update.
                 */
-               if (iattr->ia_size != ip->i_size &&
-                   (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
-                       iattr->ia_ctime = iattr->ia_mtime =
-                               current_fs_time(inode->i_sb);
-                       mask |= ATTR_CTIME | ATTR_MTIME;
-               }
+               if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME))
+                       timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
 
                if (iattr->ia_size > ip->i_size) {
                        ip->i_d.di_size = iattr->ia_size;
                        ip->i_size = iattr->ia_size;
+                       if (!(flags & XFS_ATTR_DMI))
+                               xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
                        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
                } else if (iattr->ia_size <= ip->i_size ||
                           (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
@@ -377,6 +373,9 @@ xfs_setattr(
                        ip->i_d.di_gid = gid;
                        inode->i_gid = gid;
                }
+
+               xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
+               timeflags |= XFS_ICHGTIME_CHG;
        }
 
        /*
@@ -393,37 +392,51 @@ xfs_setattr(
 
                inode->i_mode &= S_IFMT;
                inode->i_mode |= mode & ~S_IFMT;
+
+               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+               timeflags |= XFS_ICHGTIME_CHG;
        }
 
        /*
         * Change file access or modified times.
         */
-       if (mask & ATTR_ATIME) {
-               inode->i_atime = iattr->ia_atime;
-               ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
-               ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
-               ip->i_update_core = 1;
+       if (mask & (ATTR_ATIME|ATTR_MTIME)) {
+               if (mask & ATTR_ATIME) {
+                       inode->i_atime = iattr->ia_atime;
+                       ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
+                       ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
+                       ip->i_update_core = 1;
+               }
+               if (mask & ATTR_MTIME) {
+                       inode->i_mtime = iattr->ia_mtime;
+                       ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
+                       ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
+                       timeflags &= ~XFS_ICHGTIME_MOD;
+                       timeflags |= XFS_ICHGTIME_CHG;
+               }
+               if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)))
+                       xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
        }
-       if (mask & ATTR_CTIME) {
+
+       /*
+        * Change file inode change time only if ATTR_CTIME set
+        * AND we have been called by a DMI function.
+        */
+
+       if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) {
                inode->i_ctime = iattr->ia_ctime;
                ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
                ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
                ip->i_update_core = 1;
-       }
-       if (mask & ATTR_MTIME) {
-               inode->i_mtime = iattr->ia_mtime;
-               ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
-               ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
-               ip->i_update_core = 1;
+               timeflags &= ~XFS_ICHGTIME_CHG;
        }
 
        /*
-        * And finally, log the inode core if any attribute in it
-        * has been changed.
+        * Send out timestamp changes that need to be set to the
+        * current time.  Not done when called by a DMI function.
         */
-       if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
-                   ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
-               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       if (timeflags && !(flags & XFS_ATTR_DMI))
+               xfs_ichgtime(ip, timeflags);
 
        XFS_STATS_INC(xs_ig_attrchg);
 
@@ -438,10 +451,12 @@ xfs_setattr(
         * mix so this probably isn't worth the trouble to optimize.
         */
        code = 0;
-       if (mp->m_flags & XFS_MOUNT_WSYNC)
-               xfs_trans_set_sync(tp);
+       if (tp) {
+               if (mp->m_flags & XFS_MOUNT_WSYNC)
+                       xfs_trans_set_sync(tp);
 
-       code = xfs_trans_commit(tp, commit_flags);
+               code = xfs_trans_commit(tp, commit_flags);
+       }
 
        xfs_iunlock(ip, lock_flags);
 
@@ -597,7 +612,7 @@ xfs_fsync(
 {
        xfs_trans_t     *tp;
        int             error = 0;
-       int             log_flushed = 0;
+       int             log_flushed = 0, changed = 1;
 
        xfs_itrace_entry(ip);
 
@@ -627,11 +642,19 @@ xfs_fsync(
                 * disk yet, the inode will be still be pinned.  If it is,
                 * force the log.
                 */
+
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
                if (xfs_ipincount(ip)) {
                        error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
                                      XFS_LOG_FORCE | XFS_LOG_SYNC,
                                      &log_flushed);
+               } else {
+                       /*
+                        * If the inode is not pinned and nothing has changed
+                        * we don't need to flush the cache.
+                        */
+                       changed = 0;
                }
        } else  {
                /*
@@ -666,7 +689,7 @@ xfs_fsync(
                xfs_iunlock(ip, XFS_ILOCK_EXCL);
        }
 
-       if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
+       if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) {
                /*
                 * If the log write didn't issue an ordered tag we need
                 * to flush the disk cache for the data device now.
@@ -685,11 +708,6 @@ xfs_fsync(
        return error;
 }
 
-/*
- * Flags for xfs_free_eofblocks
- */
-#define XFS_FREE_EOF_TRYLOCK   (1<<0)
-
 /*
  * This is called by xfs_inactive to free any blocks beyond eof
  * when the link count isn't zero and by xfs_dm_punch_hole() when
@@ -708,6 +726,7 @@ xfs_free_eofblocks(
        xfs_filblks_t   map_len;
        int             nimaps;
        xfs_bmbt_irec_t imap;
+       int             use_iolock = (flags & XFS_FREE_EOF_LOCK);
 
        /*
         * Figure out if there are any blocks beyond the end
@@ -749,19 +768,14 @@ xfs_free_eofblocks(
                 * cache and we can't
                 * do that within a transaction.
                 */
-               if (flags & XFS_FREE_EOF_TRYLOCK) {
-                       if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
-                               xfs_trans_cancel(tp, 0);
-                               return 0;
-                       }
-               } else {
+               if (use_iolock)
                        xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               }
                error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
                                    ip->i_size);
                if (error) {
                        xfs_trans_cancel(tp, 0);
-                       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+                       if (use_iolock)
+                               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
                        return error;
                }
 
@@ -798,7 +812,8 @@ xfs_free_eofblocks(
                        error = xfs_trans_commit(tp,
                                                XFS_TRANS_RELEASE_LOG_RES);
                }
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
+               xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)
+                                           : XFS_ILOCK_EXCL));
        }
        return error;
 }
@@ -1098,17 +1113,7 @@ xfs_release(
                     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
                    (!(ip->i_d.di_flags &
                                (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
-
-                       /*
-                        * If we can't get the iolock just skip truncating
-                        * the blocks past EOF because we could deadlock
-                        * with the mmap_sem otherwise.  We'll get another
-                        * chance to drop them once the last reference to
-                        * the inode is dropped, so we'll never leak blocks
-                        * permanently.
-                        */
-                       error = xfs_free_eofblocks(mp, ip,
-                                                  XFS_FREE_EOF_TRYLOCK);
+                       error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
                        if (error)
                                return error;
                }
@@ -1179,7 +1184,7 @@ xfs_inactive(
                     (!(ip->i_d.di_flags &
                                (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
                      (ip->i_delayed_blks != 0)))) {
-                       error = xfs_free_eofblocks(mp, ip, 0);
+                       error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
                        if (error)
                                return VN_INACTIVE_CACHE;
                }
@@ -2451,6 +2456,46 @@ xfs_set_dmattrs(
        return error;
 }
 
+int
+xfs_reclaim(
+       xfs_inode_t     *ip)
+{
+
+       xfs_itrace_entry(ip);
+
+       ASSERT(!VN_MAPPED(VFS_I(ip)));
+
+       /* bad inode, get out here ASAP */
+       if (is_bad_inode(VFS_I(ip))) {
+               xfs_ireclaim(ip);
+               return 0;
+       }
+
+       xfs_ioend_wait(ip);
+
+       ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
+
+       /*
+        * If we have nothing to flush with this inode then complete the
+        * teardown now, otherwise break the link between the xfs inode and the
+        * linux inode and clean up the xfs inode later. This avoids flushing
+        * the inode to disk during the delete operation itself.
+        *
+        * When breaking the link, we need to set the XFS_IRECLAIMABLE flag
+        * first to ensure that xfs_iunpin() will never see an xfs inode
+        * that has a linux inode being reclaimed. Synchronisation is provided
+        * by the i_flags_lock.
+        */
+       if (!ip->i_update_core && (ip->i_itemp == NULL)) {
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               xfs_iflock(ip);
+               xfs_iflags_set(ip, XFS_IRECLAIMABLE);
+               return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC);
+       }
+       xfs_inode_set_reclaim_tag(ip);
+       return 0;
+}
+
 /*
  * xfs_alloc_file_space()
  *      This routine allocates disk space for the given file.
index 167a467403a59f0c4327bb0a23cbb369e0e9d7af..a9e102de71a19c35d1bac05181b910d496e180fa 100644 (file)
@@ -38,6 +38,7 @@ int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
                const char *target_path, mode_t mode, struct xfs_inode **ipp,
                cred_t *credp);
 int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
+int xfs_reclaim(struct xfs_inode *ip);
 int xfs_change_file_space(struct xfs_inode *ip, int cmd,
                xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
 int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
index e7bdaafabc3f521349b297d9e62923b9b22499f2..740ac3ad8fd06fc61b8e2dacc10054ec27469439 100644 (file)
@@ -48,7 +48,7 @@ struct acpi_power_register {
        u8 space_id;
        u8 bit_width;
        u8 bit_offset;
-       u8 access_size;
+       u8 reserved;
        u64 address;
 } __attribute__ ((packed));
 
@@ -74,7 +74,6 @@ struct acpi_processor_cx {
        u32 power;
        u32 usage;
        u64 time;
-       u8 bm_sts_skip;
        struct acpi_processor_cx_policy promotion;
        struct acpi_processor_cx_policy demotion;
        char desc[ACPI_CX_DESC_LEN];
index 69206957b72c52efe1f9fe7d2479cfc931d7d7b7..e694263445f7b5dd48ff294cdd6ec4b76f3769a2 100644 (file)
@@ -131,7 +131,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
                debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
 
        } else
-               dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+               dma_sync_single_for_cpu(dev, addr, size, dir);
 }
 
 static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -148,7 +148,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
                debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
 
        } else
-               dma_sync_single_for_device(dev, addr + offset, size, dir);
+               dma_sync_single_for_device(dev, addr, size, dir);
 }
 
 static inline void
index 3d016e99ee5d0d027f460c61b9671e6649a364b7..e6f3b120f51a5e4f9d51bf694b8ecba0b8d7503c 100644 (file)
@@ -6,7 +6,6 @@
        {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
@@ -85,6 +84,7 @@
        {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
        {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
        {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
-       {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index 89387962f5f57291b289846f92d55e8203ccaf11..4fb357312b3bf4b4484879fa5ede0942e39fb15f 100644 (file)
@@ -1000,8 +1000,8 @@ static inline int ata_ok(u8 status)
 
 static inline int lba_28_ok(u64 block, u32 n_block)
 {
-       /* check the ending block number: must be LESS THAN 0x0fffffff */
-       return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
+       /* check the ending block number */
+       return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
 }
 
 static inline int lba_48_ok(u64 block, u32 n_block)
index 05f6018b928eb1860761e047bdecdebfecb1bc51..912b8ff3f27290d333fa8f7d4f11626817a42ea2 100644 (file)
@@ -932,7 +932,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_max_discard_sectors(struct request_queue *q,
                unsigned int max_discard_sectors);
 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
 extern void blk_queue_alignment_offset(struct request_queue *q,
                                       unsigned int alignment);
 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
@@ -1083,7 +1083,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
        return q->limits.physical_block_size;
 }
 
-static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
+static inline int bdev_physical_block_size(struct block_device *bdev)
 {
        return queue_physical_block_size(bdev_get_queue(bdev));
 }
index f73bc1b68c107d9d68531de46ad916d18de070c3..64b1a4cc5a8b9757c78c56905176da9b9e1d4545 100644 (file)
@@ -282,12 +282,10 @@ extern struct clocksource * __init __weak clocksource_default_clock(void);
 extern void clocksource_mark_unstable(struct clocksource *cs);
 
 #ifdef CONFIG_GENERIC_TIME_VSYSCALL
-extern void
-update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult);
+extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
 extern void update_vsyscall_tz(void);
 #else
-static inline void
-update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult)
+static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
 {
 }
 
index cab23f2da4dc35111c2d90c71a5e8630266005a2..af931ee43dd8e43454b9cd9d389ec0bd5b1a7b21 100644 (file)
@@ -309,7 +309,5 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
 asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
                                  int flags, int mode);
 
-extern void __user *compat_alloc_user_space(unsigned long len);
-
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index a73454aec33312359c4233fa4ec7e0598dbbe345..a5740fc4d04b9415478f4180dcbdad289f5eb281 100644 (file)
@@ -21,7 +21,8 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
 extern int cpuset_init(void);
 extern void cpuset_init_smp(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p,
+                                      struct cpumask *mask);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
@@ -68,6 +69,9 @@ struct seq_file;
 extern void cpuset_task_status_allowed(struct seq_file *m,
                                        struct task_struct *task);
 
+extern void cpuset_lock(void);
+extern void cpuset_unlock(void);
+
 extern int cpuset_mem_spread_node(void);
 
 static inline int cpuset_do_page_mem_spread(void)
@@ -101,11 +105,10 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
 {
        cpumask_copy(mask, cpu_possible_mask);
 }
-
-static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
+                                             struct cpumask *mask)
 {
-       cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
-       return cpumask_any(cpu_active_mask);
+       cpumask_copy(mask, cpu_possible_mask);
 }
 
 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -154,6 +157,9 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
 {
 }
 
+static inline void cpuset_lock(void) {}
+static inline void cpuset_unlock(void) {}
+
 static inline int cpuset_mem_spread_node(void)
 {
        return 0;
index 3e9bd6ae10c0a8096f08e6645ebb77b42205d32b..12ff8c3f1d053f471c14e5f74ad50bf0355ebfa8 100644 (file)
 
 /* Code active when included from pre-boot environment: */
 
-/*
- * Some architectures want to ensure there is no local data in their
- * pre-boot environment, so that data can arbitarily relocated (via
- * GOT references).  This is achieved by defining STATIC_RW_DATA to
- * be null.
- */
-#ifndef STATIC_RW_DATA
-#define STATIC_RW_DATA static
-#endif
-
 /* A trivial malloc implementation, adapted from
  *  malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
  */
-STATIC_RW_DATA unsigned long malloc_ptr;
-STATIC_RW_DATA int malloc_count;
+static unsigned long malloc_ptr;
+static int malloc_count;
 
 static void *malloc(int size)
 {
index 7ffab7cb9ee3d6d1c00111ab968b25c9dd0bd778..15e4eb713694442a1ed77d5972667ac69543fd8a 100644 (file)
@@ -357,8 +357,6 @@ struct ethtool_rxnfc {
        __u32                           flow_type;
        /* The rx flow hash value or the rule DB size */
        __u64                           data;
-       /* The following fields are not valid and must not be used for
-        * the ETHTOOL_{G,X}RXFH commands. */
        struct ethtool_rx_flow_spec     fs;
        __u32                           rule_cnt;
        __u32                           rule_locs[0];
index 6181f1baafa7606b5dd06f0a995ebd6cdbd4d0aa..7f3b39b31bedb72ab53582c8c90e50082b1c8ba6 100755 (executable)
@@ -794,6 +794,8 @@ struct fb_tile_ops {
 #define FBINFO_MISC_USEREVENT          0x10000 /* event request
                                                  from userspace */
 #define FBINFO_MISC_TILEBLITTING       0x20000 /* use tile blitting */
+#define FBINFO_MISC_FIRMWARE           0x40000 /* a replaceable firmware
+                                                 inited framebuffer */
 
 /* A driver may set this flag to indicate that it does want a set_par to be
  * called every time when fbcon_switch is executed. The advantage is that with
@@ -807,8 +809,6 @@ struct fb_tile_ops {
  */
 #define FBINFO_MISC_ALWAYS_SETPAR   0x40000
 
-/* where the fb is a firmware driver, and can be replaced with a proper one */
-#define FBINFO_MISC_FIRMWARE        0x80000
 /*
  * Host and GPU endianness differ.
  */
index 83d7510b85017348ffbbf8ddd84b778f64230045..d31544628436cb717b88079f7f99bf6e9b775b68 100644 (file)
@@ -11,7 +11,6 @@
 struct firmware {
        size_t size;
        const u8 *data;
-       struct page **pages;
 };
 
 struct device;
index da7e52b099f3221cf723f008aa2ac627d276e11b..5a361f85cfec483e0a595dcb2f4c2ee632ac56d9 100644 (file)
@@ -64,12 +64,9 @@ extern bool freeze_task(struct task_struct *p, bool sig_only);
 extern void cancel_freezing(struct task_struct *p);
 
 #ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_freezing_or_frozen(struct task_struct *task);
+extern int cgroup_frozen(struct task_struct *task);
 #else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_freezing_or_frozen(struct task_struct *task)
-{
-       return 0;
-}
+static inline int cgroup_frozen(struct task_struct *task) { return 0; }
 #endif /* !CONFIG_CGROUP_FREEZER */
 
 /*
index 1ff096263b59f7f76bb9c6452f4c50209e9e3462..98ea200181512f969203d323369c33dde121f493 100644 (file)
@@ -87,9 +87,6 @@ struct inodes_stat_t {
  */
 #define FMODE_NOCMTIME         ((__force fmode_t)2048)
 
-/* Expect random access pattern */
-#define FMODE_RANDOM           ((__force fmode_t)4096)
-
 /*
  * The below are the various read and write types that we support. Some of
  * them include behavioral modifiers that send information down to the
@@ -145,11 +142,11 @@ struct inodes_stat_t {
  *
  */
 #define RW_MASK                1
-#define RWA_MASK               16
+#define RWA_MASK       2
 #define READ 0
 #define WRITE 1
-#define READA                  16 /* readahead - don't block if no resources */
-#define SWRITE                 17 /* for ll_rw_block(), wait for buffer lock */
+#define READA 2                /* read-ahead  - don't block if no resources */
+#define SWRITE 3       /* for ll_rw_block() - wait for buffer lock */
 #define READ_SYNC      (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
 #define READ_META      (READ | (1 << BIO_RW_META))
 #define WRITE_SYNC_PLUG        (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
@@ -1310,8 +1307,6 @@ extern int send_sigurg(struct fown_struct *fown);
 #define MNT_FORCE      0x00000001      /* Attempt to forcibily umount */
 #define MNT_DETACH     0x00000002      /* Just detach from the tree */
 #define MNT_EXPIRE     0x00000004      /* Mark for expiry */
-#define UMOUNT_NOFOLLOW        0x00000008      /* Don't follow symlink on umount */
-#define UMOUNT_UNUSED  0x80000000      /* Flag guaranteed to be unused */
 
 extern struct list_head super_blocks;
 extern spinlock_t sb_lock;
@@ -2227,7 +2222,6 @@ extern int generic_segment_checks(const struct iovec *iov,
 /* fs/block_dev.c */
 extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                                unsigned long nr_segs, loff_t pos);
-extern int block_fsync(struct file *filp, struct dentry *dentry, int datasync);
 
 /* fs/splice.c */
 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
@@ -2378,7 +2372,7 @@ extern const struct file_operations simple_dir_operations;
 extern const struct inode_operations simple_dir_inode_operations;
 struct tree_descr { char *name; const struct file_operations *ops; int mode; };
 struct dentry *d_alloc_name(struct dentry *, const char *);
-extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
+extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
 extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
 extern void simple_release_fs(struct vfsmount **mount, int *count);
 
index 040b6796ab4d47daab486bbed2ae7b5fea34c399..9bace4b9f4fe954717b0bf45c840c3fd794734f8 100644 (file)
@@ -162,11 +162,10 @@ struct hrtimer_clock_base {
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
  * @hres_active:       State of high resolution mode
- * @hang_detected:     The last hrtimer interrupt detected a hang
- * @nr_events:         Total number of hrtimer interrupt events
- * @nr_retries:                Total number of hrtimer interrupt retries
- * @nr_hangs:          Total number of hrtimer interrupt hangs
- * @max_hang_time:     Maximum time spent in hrtimer_interrupt
+ * @check_clocks:      Indictator, when set evaluate time source and clock
+ *                     event devices whether high resolution mode can be
+ *                     activated.
+ * @nr_events:         Total number of timer interrupt events
  */
 struct hrtimer_cpu_base {
        spinlock_t                      lock;
@@ -174,11 +173,7 @@ struct hrtimer_cpu_base {
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
        int                             hres_active;
-       int                             hang_detected;
        unsigned long                   nr_events;
-       unsigned long                   nr_retries;
-       unsigned long                   nr_hangs;
-       ktime_t                         max_hang_time;
 #endif
 };
 
index e7660728a2ed970af93269bcc07d08631bb78469..52e15e079c619894f680dfe8b62531358a7491aa 100644 (file)
@@ -1098,8 +1098,6 @@ enum ieee80211_category {
        WLAN_CATEGORY_SA_QUERY = 8,
        WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
        WLAN_CATEGORY_WMM = 17,
-       WLAN_CATEGORY_MESH_PLINK = 30,          /* Pending ANA approval */
-       WLAN_CATEGORY_MESH_PATH_SEL = 32,       /* Pending ANA approval */
        WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
        WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
 };
index 137130ba8e004c2cb72fe47848e3f85cb2482db3..5a9aae4adb444a3c8a63b5b9c5afd2c4456396de 100644 (file)
@@ -2,7 +2,6 @@
 #define _IF_TUNNEL_H_
 
 #include <linux/types.h>
-#include <asm/byteorder.h>
 
 #ifdef __KERNEL__
 #include <linux/ip.h>
index c49d6f542104a338a68d27fa8b3c2f50c58ea65e..7ca72b74eec7e4142866f44debf96f594c70037f 100644 (file)
  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
  *                Used by threaded interrupts which need to keep the
  *                irq line disabled until the threaded handler has been run.
- * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
- *
  */
 #define IRQF_DISABLED          0x00000020
 #define IRQF_SAMPLE_RANDOM     0x00000040
 #define IRQF_SHARED            0x00000080
 #define IRQF_PROBE_SHARED      0x00000100
-#define __IRQF_TIMER           0x00000200
+#define IRQF_TIMER             0x00000200
 #define IRQF_PERCPU            0x00000400
 #define IRQF_NOBALANCING       0x00000800
 #define IRQF_IRQPOLL           0x00001000
 #define IRQF_ONESHOT           0x00002000
-#define IRQF_NO_SUSPEND                0x00004000
-
-#define IRQF_TIMER             (__IRQF_TIMER | IRQF_NO_SUSPEND)
 
 /*
  * Bits used by threaded handlers:
index 9e5f45a8bab494c6b4ecbdf37df50e3063401f36..ae9653dbcd78dae5662a24c609703d855529d09b 100644 (file)
@@ -400,9 +400,7 @@ static inline int irq_has_action(unsigned int irq)
 
 /* Dynamic irq helper functions */
 extern void dynamic_irq_init(unsigned int irq);
-void dynamic_irq_init_keep_chip_data(unsigned int irq);
 extern void dynamic_irq_cleanup(unsigned int irq);
-void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
 
 /* Set/get chip/data for an IRQ: */
 extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
index 638ce4554c76e903d632720b3d25dea3350c745b..f1011f7f3d4142bf5771007661cec4cd8a6c3315 100644 (file)
@@ -653,7 +653,6 @@ struct transaction_s
         * waiting for it to finish.
         */
        unsigned int t_synchronous_commit:1;
-       unsigned int t_flushed_data_blocks:1;
 
        /*
         * For use by the filesystem to store fs-specific data
index c728a50f8dabdc56c9e53e744237228174179925..b7bbb5ddd7aec861215887db80008b2b54543f79 100644 (file)
@@ -53,7 +53,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
  */
 struct kvm_io_bus {
        int                   dev_count;
-#define NR_IOBUS_DEVS 200
+#define NR_IOBUS_DEVS 6
        struct kvm_io_device *devs[NR_IOBUS_DEVS];
 };
 
@@ -116,11 +116,6 @@ struct kvm_memory_slot {
        int user_alloc;
 };
 
-static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
-{
-       return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
-}
-
 struct kvm_kernel_irq_routing_entry {
        u32 gsi;
        u32 type;
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
deleted file mode 100644 (file)
index 7bf01d7..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _LCM_H
-#define _LCM_H
-
-#include <linux/compiler.h>
-
-unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
-
-#endif /* _LCM_H */
index a0699160d19b0e0d167e2ff8635571b9f3974b22..b0f6d97a06c2cff52f40eb1b1b0c846895b29038 100644 (file)
@@ -339,7 +339,6 @@ enum {
        ATA_EHI_HOTPLUGGED      = (1 << 0),  /* could have been hotplugged */
        ATA_EHI_NO_AUTOPSY      = (1 << 2),  /* no autopsy */
        ATA_EHI_QUIET           = (1 << 3),  /* be quiet */
-       ATA_EHI_NO_RECOVERY     = (1 << 4),  /* no recovery */
 
        ATA_EHI_DID_SOFTRESET   = (1 << 16), /* already soft-reset this port */
        ATA_EHI_DID_HARDRESET   = (1 << 17), /* already soft-reset this port */
index 1bb43c1db2187b6457b49df60d0e47d954088795..397a3774e6e964a06cb4dbd99738684034e22a8b 100644 (file)
@@ -77,11 +77,7 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_MAYSHARE    0x00000080
 
 #define VM_GROWSDOWN   0x00000100      /* general info on the segment */
-#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
 #define VM_GROWSUP     0x00000200
-#else
-#define VM_GROWSUP     0x00000000
-#endif
 #define VM_PFNMAP      0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE   0x00000800      /* ETXTBSY on write attempts.. */
 
@@ -842,12 +838,6 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len);
@@ -1206,10 +1196,8 @@ unsigned long ra_submit(struct file_ra_state *ra,
 
 /* Do stack extension */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-#if VM_GROWSUP
+#ifdef CONFIG_IA64
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
-#else
-  #define expand_upwards(vma, address) do { } while (0)
 #endif
 extern int expand_stack_downwards(struct vm_area_struct *vma,
                                  unsigned long address);
index 9d12ed56bfbc6116db691245a311ae313bce2e2e..84a524afb3dcdffdd60c7ef1eaf2672acd7731ca 100644 (file)
@@ -138,7 +138,7 @@ struct vm_area_struct {
                                           within vm_mm. */
 
        /* linked list of VM areas per task, sorted by address */
-       struct vm_area_struct *vm_next, *vm_prev;
+       struct vm_area_struct *vm_next;
 
        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
        unsigned long vm_flags;         /* Flags, see mm.h. */
index 118f0295a575c81af69aa179d588a450f881a193..47ba464f5170eb3695d11192b28ee96601ead6b8 100644 (file)
@@ -94,8 +94,6 @@
 
 #define  SDIO_BUS_WIDTH_1BIT   0x00
 #define  SDIO_BUS_WIDTH_4BIT   0x02
-#define  SDIO_BUS_ECSI         0x20    /* Enable continuous SPI interrupt */
-#define  SDIO_BUS_SCSI         0x40    /* Support continuous SPI interrupt */
 
 #define  SDIO_BUS_CD_DISABLE     0x80  /* disable pull-up on DAT3 (pin 1) */
 
index 6c31a2a7c18d72194f7d197cb20263feb3f55936..6f7561730d88c3b8c816e34e76b140cb9612ce8c 100644 (file)
@@ -289,13 +289,6 @@ struct zone {
        /* zone watermarks, access with *_wmark_pages(zone) macros */
        unsigned long watermark[NR_WMARK];
 
-       /*
-        * When free pages are below this point, additional steps are taken
-        * when reading the number of free pages to avoid per-cpu counter
-        * drift allowing watermarks to be breached
-        */
-       unsigned long percpu_drift_mark;
-
        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
@@ -467,12 +460,6 @@ static inline int zone_is_oom_locked(const struct zone *zone)
        return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
-#ifdef CONFIG_SMP
-unsigned long zone_nr_free_pages(struct zone *zone);
-#else
-#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
-#endif /* CONFIG_SMP */
-
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
index 91b05c171854f12488e92c8fe2d23812bb1f03e5..6991ab5b24d1bbfd736df4d27315a83ee7271779 100644 (file)
@@ -14,10 +14,8 @@ struct irq_desc;
 extern void mask_msi_irq(unsigned int irq);
 extern void unmask_msi_irq(unsigned int irq);
 extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
-extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
 extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
 extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
-extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
 extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
 
 struct msi_desc {
index ec12f8c247705225e3143397d37d4f0917ad8269..812a5f3c2abe90da914e7c998b7b7d8e509b0e38 100644 (file)
@@ -1560,8 +1560,6 @@ extern void netif_carrier_on(struct net_device *dev);
 
 extern void netif_carrier_off(struct net_device *dev);
 
-extern void netif_notify_peers(struct net_device *dev);
-
 /**
  *     netif_dormant_on - mark device as dormant.
  *     @dev: network device
index b26dc51df6e281bc04a0f30fd2c334f12168199b..320569eabe3bcc88dea530ac8634d59223a80409 100644 (file)
@@ -176,7 +176,6 @@ struct nfs_server {
 #define NFS_CAP_ATIME          (1U << 11)
 #define NFS_CAP_CTIME          (1U << 12)
 #define NFS_CAP_MTIME          (1U << 13)
-#define NFS_CAP_POSIX_LOCK     (1U << 14)
 
 
 /* maximum number of slots to use */
index 5ecdb50bb3766b8cbd0c67f9eeee58e61245cd01..44428d247dbe6e3b52e9b71798eff99735ae4319 100644 (file)
@@ -201,7 +201,6 @@ static inline int notifier_to_errno(int ret)
 #define NETDEV_PRE_UP          0x000D
 #define NETDEV_BONDING_OLDTYPE  0x000E
 #define NETDEV_BONDING_NEWTYPE  0x000F
-#define NETDEV_NOTIFY_PEERS    0x0013
 
 #define SYS_DOWN       0x0001  /* Notify of system down */
 #define SYS_RESTART    SYS_DOWN
index e07d194d0aec34cd73ed54eb092fe25f9dbc13ce..2547515f7e929523ee76656c5d09857a48453cf2 100644 (file)
@@ -945,11 +945,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 }
 #endif /* CONFIG_PCI_DOMAINS */
 
-/* some architectures require additional setup to direct VGA traffic */
-typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
-                     unsigned int command_bits, bool change_bridge);
-extern void pci_register_set_vga_state(arch_set_vga_state_t func);
-
 #else /* CONFIG_PCI is not enabled */
 
 /*
index fe2f4ee6ce411d56c0e3456468896e7c166ba280..1b7f2a7939cb85abeb069637f6e4cc1fbcd360a9 100644 (file)
 #define PCI_DEVICE_ID_VLSI_82C147      0x0105
 #define PCI_DEVICE_ID_VLSI_VAS96011    0x0702
 
-/* AMD RD890 Chipset */
-#define PCI_DEVICE_ID_RD890_IOMMU      0x5a23
-
 #define PCI_VENDOR_ID_ADL              0x1005
 #define PCI_DEVICE_ID_ADL_2301         0x2301
 
 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE       0x0759
 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS     0x07D8
 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS     0x0AA2
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA     0x0D85
 
 #define PCI_VENDOR_ID_IMS              0x10e0
 #define PCI_DEVICE_ID_IMS_TT128                0x9128
 #define PCI_DEVICE_ID_AFAVLAB_P030     0x2182
 #define PCI_SUBDEVICE_ID_AFAVLAB_P061          0x2150
 
-#define PCI_VENDOR_ID_BCM_GVC          0x14a4
 #define PCI_VENDOR_ID_BROADCOM         0x14e4
 #define PCI_DEVICE_ID_TIGON3_5752      0x1600
 #define PCI_DEVICE_ID_TIGON3_5752M     0x1601
 #define PCI_VENDOR_ID_JMICRON          0x197B
 #define PCI_DEVICE_ID_JMICRON_JMB360   0x2360
 #define PCI_DEVICE_ID_JMICRON_JMB361   0x2361
-#define PCI_DEVICE_ID_JMICRON_JMB362   0x2362
 #define PCI_DEVICE_ID_JMICRON_JMB363   0x2363
 #define PCI_DEVICE_ID_JMICRON_JMB365   0x2365
 #define PCI_DEVICE_ID_JMICRON_JMB366   0x2366
 #define PCI_DEVICE_ID_INTEL_82840_HB   0x1a21
 #define PCI_DEVICE_ID_INTEL_82845_HB   0x1a30
 #define PCI_DEVICE_ID_INTEL_IOAT       0x1a38
-#define PCI_DEVICE_ID_INTEL_CPT_SMBUS  0x1c22
-#define PCI_DEVICE_ID_INTEL_CPT_LPC1   0x1c42
-#define PCI_DEVICE_ID_INTEL_CPT_LPC2   0x1c43
 #define PCI_DEVICE_ID_INTEL_82801AA_0  0x2410
 #define PCI_DEVICE_ID_INTEL_82801AA_1  0x2411
 #define PCI_DEVICE_ID_INTEL_82801AA_3  0x2413
index 34066ffd893d733b8134da1b57915bec314afb48..7fc194aef8c23cc8bfe1ac4a98be55486a8b9d64 100644 (file)
@@ -2,25 +2,13 @@
 #define _LINUX_POISON_H
 
 /********** include/linux/list.h **********/
-
-/*
- * Architectures might want to move the poison pointer offset
- * into some well-recognized area such as 0xdead000000000000,
- * that is also not mappable by user-space exploits:
- */
-#ifdef CONFIG_ILLEGAL_POINTER_VALUE
-# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
-#else
-# define POISON_POINTER_DELTA 0
-#endif
-
 /*
  * These are non-NULL pointers that will result in page faults
  * under normal circumstances, used to verify that nobody uses
  * non-initialized list entries.
  */
-#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1  ((void *) 0x00100100)
+#define LIST_POISON2  ((void *) 0x00200200)
 
 /********** include/linux/timer.h **********/
 /*
 #define POISON_FREE    0x6b    /* for use-after-free poisoning */
 #define        POISON_END      0xa5    /* end-byte of poisoning */
 
-/********** mm/hugetlb.c **********/
-/*
- * Private mappings of hugetlb pages use this poisoned value for
- * page->mapping. The core VM should not be doing anything with this mapping
- * but futex requires the existence of some page->mapping value even though it
- * is unused if PAGE_MAPPING_ANON is set.
- */
-#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
-
 /********** arch/$ARCH/mm/init.c **********/
 #define POISON_FREE_INITMEM    0xcc
 
index a529d86e7e73484080a32f2e9e24296aa22034ed..3ebb231536405d5c6e557f337692de98969d8c63 100644 (file)
@@ -26,10 +26,6 @@ static inline void writeout_quota_sb(struct super_block *sb, int type)
                sb->s_qcop->quota_sync(sb, type);
 }
 
-void inode_add_rsv_space(struct inode *inode, qsize_t number);
-void inode_claim_rsv_space(struct inode *inode, qsize_t number);
-void inode_sub_rsv_space(struct inode *inode, qsize_t number);
-
 int dquot_initialize(struct inode *inode, int type);
 int dquot_drop(struct inode *inode);
 struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
@@ -46,6 +42,7 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number);
 int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
 int dquot_claim_space(struct inode *inode, qsize_t number);
 void dquot_release_reserved_space(struct inode *inode, qsize_t number);
+qsize_t dquot_get_reserved_space(struct inode *inode);
 
 int dquot_free_space(struct inode *inode, qsize_t number);
 int dquot_free_inode(const struct inode *inode, qsize_t number);
@@ -202,8 +199,6 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
                if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
                        return 1;
        }
-       else
-               inode_add_rsv_space(inode, nr);
        return 0;
 }
 
@@ -226,7 +221,7 @@ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
                if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
                        return 1;
        } else
-               inode_claim_rsv_space(inode, nr);
+               inode_add_bytes(inode, nr);
 
        mark_inode_dirty(inode);
        return 0;
@@ -240,8 +235,6 @@ void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
 {
        if (sb_any_quota_active(inode->i_sb))
                inode->i_sb->dq_op->release_rsv(inode, nr);
-       else
-               inode_sub_rsv_space(inode, nr);
 }
 
 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
index 7fa02b4af838513b9a609122db0772654cdc2f91..99928dce37ea927bde2f515df93ad9a2bab2a239 100644 (file)
@@ -70,11 +70,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
 void reiserfs_security_free(struct reiserfs_security_handle *sec);
 #endif
 
-static inline int reiserfs_xattrs_initialized(struct super_block *sb)
-{
-       return REISERFS_SB(sb)->priv_root != NULL;
-}
-
 #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
 static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
 {
index f1e914eefeab89fc246a2e266a22b7e66111717b..40fc7e62608220e9a64dd70493c3b091fa49a0b7 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/time.h>
 
+struct task_struct;
+
 /*
  * Resource control/accounting header file for linux
  */
@@ -68,12 +70,6 @@ struct rlimit {
  */
 #include <asm/resource.h>
 
-#ifdef __KERNEL__
-
-struct task_struct;
-
 int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
 
-#endif /* __KERNEL__ */
-
 #endif
index e8e90f5ed668ec220ecbaf4af2541687da423277..30473ee9628b710b874f03be9d2eb745297b7088 100644 (file)
@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
 
 
 extern void calc_global_load(void);
+extern u64 cpu_nr_migrations(int cpu);
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
@@ -627,9 +628,6 @@ struct signal_struct {
        cputime_t utime, stime, cutime, cstime;
        cputime_t gtime;
        cputime_t cgtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-       cputime_t prev_utime, prev_stime;
-#endif
        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
        unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
        unsigned long inblock, oublock, cinblock, coublock;
@@ -866,10 +864,7 @@ static inline int sd_balance_for_mc_power(void)
        if (sched_smt_power_savings)
                return SD_POWERSAVINGS_BALANCE;
 
-       if (!sched_mc_power_savings)
-               return SD_PREFER_SIBLING;
-
-       return 0;
+       return SD_PREFER_SIBLING;
 }
 
 static inline int sd_balance_for_package_power(void)
@@ -1000,7 +995,6 @@ struct sched_domain {
        char *name;
 #endif
 
-       unsigned int span_weight;
        /*
         * Span of all CPUs in this domain.
         *
@@ -1072,8 +1066,7 @@ struct sched_domain;
 struct sched_class {
        const struct sched_class *next;
 
-       void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
-                             bool head);
+       void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
        void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
        void (*yield_task) (struct rq *rq);
 
@@ -1083,8 +1076,7 @@ struct sched_class {
        void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
-       int  (*select_task_rq)(struct rq *rq, struct task_struct *p,
-                              int sd_flag, int flags);
+       int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
 
        unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
                        struct rq *busiest, unsigned long max_load_move,
@@ -1096,8 +1088,7 @@ struct sched_class {
                              enum cpu_idle_type idle);
        void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
        void (*post_schedule) (struct rq *this_rq);
-       void (*task_waking) (struct rq *this_rq, struct task_struct *task);
-       void (*task_woken) (struct rq *this_rq, struct task_struct *task);
+       void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
 
        void (*set_cpus_allowed)(struct task_struct *p,
                                 const struct cpumask *newmask);
@@ -1108,7 +1099,7 @@ struct sched_class {
 
        void (*set_curr_task) (struct rq *rq);
        void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
-       void (*task_fork) (struct task_struct *p);
+       void (*task_new) (struct rq *rq, struct task_struct *p);
 
        void (*switched_from) (struct rq *this_rq, struct task_struct *task,
                               int running);
@@ -1117,11 +1108,10 @@ struct sched_class {
        void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
                             int oldprio, int running);
 
-       unsigned int (*get_rr_interval) (struct rq *rq,
-                                        struct task_struct *task);
+       unsigned int (*get_rr_interval) (struct task_struct *task);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       void (*moved_group) (struct task_struct *p, int on_rq);
+       void (*moved_group) (struct task_struct *p);
 #endif
 };
 
@@ -1182,6 +1172,7 @@ struct sched_entity {
        u64                     nr_failed_migrations_running;
        u64                     nr_failed_migrations_hot;
        u64                     nr_forced_migrations;
+       u64                     nr_forced2_migrations;
 
        u64                     nr_wakeups;
        u64                     nr_wakeups_sync;
@@ -1547,6 +1538,7 @@ struct task_struct {
        /* bitmask of trace recursion */
        unsigned long trace_recursion;
 #endif /* CONFIG_TRACING */
+       unsigned long stack_start;
 };
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1731,7 +1723,6 @@ static inline void put_task_struct(struct task_struct *t)
 extern cputime_t task_utime(struct task_struct *p);
 extern cputime_t task_stime(struct task_struct *p);
 extern cputime_t task_gtime(struct task_struct *p);
-extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
 
 extern int task_free_register(struct notifier_block *n);
 extern int task_free_unregister(struct notifier_block *n);
@@ -1892,7 +1883,6 @@ extern void sched_clock_idle_sleep_event(void);
 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
 
 #ifdef CONFIG_HOTPLUG_CPU
-extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
 extern void idle_task_exit(void);
 #else
 static inline void idle_task_exit(void) {}
index 3cbf483919bc95475a8d3b4a63ce16e6349798b3..3d0a9ff24f01220f2957d8f3665eb3467dcafa6d 100644 (file)
@@ -301,7 +301,6 @@ struct ssb_bus {
        /* ID information about the Chip. */
        u16 chip_id;
        u16 chip_rev;
-       u16 sprom_offset;
        u16 sprom_size;         /* number of words in sprom */
        u8 chip_package;
 
@@ -391,9 +390,6 @@ extern int ssb_bus_sdiobus_register(struct ssb_bus *bus,
 
 extern void ssb_bus_unregister(struct ssb_bus *bus);
 
-/* Does the device have an SPROM? */
-extern bool ssb_is_sprom_available(struct ssb_bus *bus);
-
 /* Set a fallback SPROM.
  * See kdoc at the function definition for complete documentation. */
 extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
index 2cdf249b4e5f0627980e4140bf96170cb8546ad0..4e27acf0a92f91f3f3052f96dcdfa4d142ca369e 100644 (file)
@@ -53,7 +53,6 @@
 #define  SSB_CHIPCO_CAP_64BIT          0x08000000      /* 64-bit Backplane */
 #define  SSB_CHIPCO_CAP_PMU            0x10000000      /* PMU available (rev >= 20) */
 #define  SSB_CHIPCO_CAP_ECI            0x20000000      /* ECI available (rev >= 20) */
-#define  SSB_CHIPCO_CAP_SPROM          0x40000000      /* SPROM present */
 #define SSB_CHIPCO_CORECTL             0x0008
 #define  SSB_CHIPCO_CORECTL_UARTCLK0   0x00000001      /* Drive UART with internal clock */
 #define         SSB_CHIPCO_CORECTL_SE          0x00000002      /* sync clk out enable (corerev >= 3) */
 
 
 /** Chip specific Chip-Status register contents. */
-#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS      0x00000040 /* SPROM present */
 #define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL     0x00000003
 #define SSB_CHIPCO_CHST_4325_DEFCIS_SEL                0 /* OTP is powered up, use def. CIS, no SPROM */
 #define SSB_CHIPCO_CHST_4325_SPROM_SEL         1 /* OTP is powered up, SPROM is present */
 #define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT  4
 #define SSB_CHIPCO_CHST_4325_PMUTOP_2B                 0x00000200 /* 1 for 2b, 0 for to 2a */
 
-/** Macros to determine SPROM presence based on Chip-Status register. */
-#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \
-       ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
-               SSB_CHIPCO_CHST_4325_OTP_SEL)
-#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \
-       (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS)
-#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \
-       (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
-               SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \
-        ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
-               SSB_CHIPCO_CHST_4325_OTP_SEL))
-
 
 
 /** Clockcontrol masks and values **/
@@ -578,7 +564,6 @@ struct ssb_chipcommon_pmu {
 struct ssb_chipcommon {
        struct ssb_device *dev;
        u32 capabilities;
-       u32 status;
        /* Fast Powerup Delay constant */
        u16 fast_pwrup_delay;
        struct ssb_chipcommon_pmu pmu;
index b8be23ce191547126ea00f54fcf7a12a6b742fad..9ae9082eaeb46eb2f9d9573a2706d339ab8c06ab 100644 (file)
 #define SSB_SPROMSIZE_WORDS_R4         220
 #define SSB_SPROMSIZE_BYTES_R123       (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
 #define SSB_SPROMSIZE_BYTES_R4         (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
-#define SSB_SPROM_BASE1                        0x1000
-#define SSB_SPROM_BASE31               0x0800
+#define SSB_SPROM_BASE                 0x1000
 #define SSB_SPROM_REVISION             0x107E
 #define  SSB_SPROM_REVISION_REV                0x00FF  /* SPROM Revision number */
 #define  SSB_SPROM_REVISION_CRC                0xFF00  /* SPROM CRC8 value */
index 977d15056fa17587e4a9c0a4f8e82fec5641bdca..4ec90019c1a4c3997c8da80dd3d36f3b31d09b11 100644 (file)
@@ -218,11 +218,21 @@ static inline void lru_cache_add_anon(struct page *page)
        __lru_cache_add(page, LRU_INACTIVE_ANON);
 }
 
+static inline void lru_cache_add_active_anon(struct page *page)
+{
+       __lru_cache_add(page, LRU_ACTIVE_ANON);
+}
+
 static inline void lru_cache_add_file(struct page *page)
 {
        __lru_cache_add(page, LRU_INACTIVE_FILE);
 }
 
+static inline void lru_cache_add_active_file(struct page *page)
+{
+       __lru_cache_add(page, LRU_ACTIVE_FILE);
+}
+
 /* linux/mm/vmscan.c */
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                        gfp_t gfp_mask, nodemask_t *mask);
index 6ba163fb3684d83e8b044cb4f808dfe3c570f5af..93515c6190a7b8d01b8c0f0fc99293d5f6d9e0ee 100644 (file)
@@ -153,8 +153,7 @@ static void prof_sysexit_disable_##sname(void)                                     \
 #define __SC_STR_TDECL6(t, a, ...)     #t, __SC_STR_TDECL5(__VA_ARGS__)
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)                               \
-       static struct ftrace_event_call                                 \
-       __attribute__((__aligned__(4))) event_enter_##sname;            \
+       static struct ftrace_event_call event_enter_##sname;            \
        struct trace_event enter_syscall_print_##sname = {              \
                .trace                  = print_syscall_enter,          \
        };                                                              \
@@ -190,8 +189,7 @@ static void prof_sysexit_disable_##sname(void)                                     \
        }
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)                                        \
-       static struct ftrace_event_call                                 \
-       __attribute__((__aligned__(4))) event_exit_##sname;             \
+       static struct ftrace_event_call event_exit_##sname;             \
        struct trace_event exit_syscall_print_##sname = {               \
                .trace                  = print_syscall_exit,           \
        };                                                              \
index 1dba6ee55203fec99992d211eb169938fcec918e..bf2a0c7488780b6096d2c5ccf93716fdea5b5b89 100644 (file)
@@ -150,7 +150,6 @@ extern int tboot_force_iommu(void);
 
 #else
 
-#define tboot_enabled()                        0
 #define tboot_probe()                  do { } while (0)
 #define tboot_shutdown(shutdown_type)  do { } while (0)
 #define tboot_sleep(sleep_state, pm1a_control, pm1b_control)   \
index 8dc082194b226a3485535fa09646e6f31ccc4558..0482229c07db348636a9089722b69168a8b4dcc9 100644 (file)
@@ -98,9 +98,6 @@ extern int tick_check_oneshot_change(int allow_nohz);
 extern struct tick_sched *tick_get_tick_sched(int cpu);
 extern void tick_check_idle(int cpu);
 extern int tick_oneshot_mode_active(void);
-#  ifndef arch_needs_cpu
-#   define arch_needs_cpu(cpu) (0)
-#  endif
 # else
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
index 5b81156780b17f8bf52608ca7b8230dd638a530a..57e63579bfdd7f193511675fc9ed26141b586638 100644 (file)
@@ -99,7 +99,7 @@ int arch_update_cpu_topology(void);
                                | 1*SD_WAKE_AFFINE                      \
                                | 1*SD_SHARE_CPUPOWER                   \
                                | 0*SD_POWERSAVINGS_BALANCE             \
-                               | 1*SD_SHARE_PKG_RESOURCES              \
+                               | 0*SD_SHARE_PKG_RESOURCES              \
                                | 0*SD_SERIALIZE                        \
                                | 0*SD_PREFER_SIBLING                   \
                                ,                                       \
index e9c57e9bb7f22ccb9a24843bc120e2773550c627..f0f43d08d8b8765911d51f681034841a73b1c239 100644 (file)
@@ -68,17 +68,6 @@ struct tty_buffer {
        unsigned long data[0];
 };
 
-/*
- * We default to dicing tty buffer allocations to this many characters
- * in order to avoid multiple page allocations. We know the size of
- * tty_buffer itself but it must also be taken into account that the
- * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
- * logic this must match
- */
-
-#define TTY_BUFFER_PAGE        (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
-
-
 struct tty_bufhead {
        struct delayed_work work;
        spinlock_t lock;
index dd0bde120045aab26a2ce403af9d39d8b46b4171..2526f3bbd273e522e1ff77b2ba404c87d9a5a56a 100644 (file)
@@ -19,8 +19,4 @@
 /* device can't handle its Configuration or Interface strings */
 #define USB_QUIRK_CONFIG_INTF_STRINGS  0x00000008
 
-/* device needs a pause during initialization, after we read the device
-   descriptor */
-#define USB_QUIRK_DELAY_INIT           0x00000040
-
 #endif /* __LINUX_USB_QUIRKS_H */
index 13070d659129d44cee316c25f451258973ae2d2f..2d0f222388a8e9f2dfb3f8bec716eff2999d78f2 100644 (file)
@@ -166,28 +166,6 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
-/*
- * More accurate version that also considers the currently pending
- * deltas. For that we need to loop over all cpus to find the current
- * deltas. There is no synchronization so the result cannot be
- * exactly accurate either.
- */
-static inline unsigned long zone_page_state_snapshot(struct zone *zone,
-                                       enum zone_stat_item item)
-{
-       long x = atomic_long_read(&zone->vm_stat[item]);
-
-#ifdef CONFIG_SMP
-       int cpu;
-       for_each_online_cpu(cpu)
-               x += zone_pcp(zone, cpu)->vm_stat_diff[item];
-
-       if (x < 0)
-               x = 0;
-#endif
-       return x;
-}
-
 extern unsigned long global_reclaimable_pages(void);
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 
index dc52482833a2d68fc4a6094ba684350dc671e743..66ebddcff6641f95498aa3c2e7af07cbffaa3a29 100644 (file)
@@ -69,7 +69,6 @@ struct writeback_control {
 struct bdi_writeback;
 int inode_wait(void *);
 void writeback_inodes_sb(struct super_block *);
-int writeback_inodes_sb_if_idle(struct super_block *);
 void sync_inodes_sb(struct super_block *);
 void writeback_inodes_wbc(struct writeback_control *wbc);
 long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
index 3e2576df213c609730cb38f14e5fa534fc21c896..f456534dcaf92615e4b6778bd17de23f227a5578 100644 (file)
@@ -799,7 +799,7 @@ do {                                                                        \
                X##_e -= (_FP_W_TYPE_SIZE - rsize);                     \
        X##_e = rsize - X##_e - 1;                                      \
                                                                        \
-       if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs <= X##_e)   \
+       if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs < X##_e)    \
          __FP_FRAC_SRS_1(ur_, (X##_e - _FP_WFRACBITS_##fs + 1), rsize);\
        _FP_FRAC_DISASSEMBLE_##wc(X, ur_, rsize);                       \
        if ((_FP_WFRACBITS_##fs - X##_e - 1) > 0)                       \
index c39ed07929487457ae31f6d371409bde4fa430b0..998c30fc89819f2d48140dd83ad82feb40e6f99b 100644 (file)
@@ -908,9 +908,6 @@ enum ieee80211_tkip_key_type {
  * @IEEE80211_HW_BEACON_FILTER:
  *     Hardware supports dropping of irrelevant beacon frames to
  *     avoid waking up cpu.
- * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
- *     Hardware can provide ack status reports of Tx frames to
- *     the stack.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_RX_INCLUDES_FCS                    = 1<<1,
@@ -927,7 +924,6 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_SUPPORTS_DYNAMIC_PS                = 1<<12,
        IEEE80211_HW_MFP_CAPABLE                        = 1<<13,
        IEEE80211_HW_BEACON_FILTER                      = 1<<14,
-       IEEE80211_HW_REPORTS_TX_ACK_STATUS              = 1<<15,
 };
 
 /**
index 89e54e932368430bcabad6f2210a6c510428ef4e..0a474568b003053686f98e27d4102d34148a8ade 100644 (file)
@@ -772,7 +772,6 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
                          struct iovec *data);
 void sctp_chunk_free(struct sctp_chunk *);
 void  *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
-void  *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
 struct sctp_chunk *sctp_chunkify(struct sk_buff *,
                                 const struct sctp_association *,
                                 struct sock *);
index c3eebd06bd87d15ced4521307bba2254633bd695..b9648890b3eb76e461eb939be048571d38b797b3 100644 (file)
@@ -259,21 +259,11 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
        return seq3 - seq2 >= seq1 - seq2;
 }
 
-static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
+static inline int tcp_too_many_orphans(struct sock *sk, int num)
 {
-       struct percpu_counter *ocp = sk->sk_prot->orphan_count;
-       int orphans = percpu_counter_read_positive(ocp);
-
-       if (orphans << shift > sysctl_tcp_max_orphans) {
-               orphans = percpu_counter_sum_positive(ocp);
-               if (orphans << shift > sysctl_tcp_max_orphans)
-                       return true;
-       }
-
-       if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-           atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
-               return true;
-       return false;
+       return (num > sysctl_tcp_max_orphans) ||
+               (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+                atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
 }
 
 /* syncookies: remember time of last synqueue overflow */
@@ -511,22 +501,8 @@ extern unsigned int tcp_current_mss(struct sock *sk);
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 {
-       int cutoff;
-
-       /* When peer uses tiny windows, there is no use in packetizing
-        * to sub-MSS pieces for the sake of SWS or making sure there
-        * are enough packets in the pipe for fast recovery.
-        *
-        * On the other hand, for extremely large MSS devices, handling
-        * smaller than MSS windows in this way does make sense.
-        */
-       if (tp->max_window >= 512)
-               cutoff = (tp->max_window >> 1);
-       else
-               cutoff = tp->max_window;
-
-       if (cutoff && pktsize > cutoff)
-               return max_t(int, cutoff, 68U - tp->tcp_header_len);
+       if (tp->max_window && pktsize > (tp->max_window >> 1))
+               return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
        else
                return pktsize;
 }
index 21926a0458fc91e4908f42c64022d1bfbd3cac11..2cda0401156874213972853e7c2a1cbb19f00a06 100644 (file)
@@ -182,10 +182,6 @@ extern int  sysctl_x25_clear_request_timeout;
 extern int  sysctl_x25_ack_holdback_timeout;
 extern int  sysctl_x25_forward;
 
-extern int x25_parse_address_block(struct sk_buff *skb,
-               struct x25_address *called_addr,
-               struct x25_address *calling_addr);
-
 extern int  x25_addr_ntoa(unsigned char *, struct x25_address *,
                          struct x25_address *);
 extern int  x25_addr_aton(unsigned char *, struct x25_address *,
index 91a4e4ff9a9bbeb28d66394299e3ef3c938e5b0c..a4b233318179b16e80997723b82959a45b89df90 100644 (file)
@@ -292,7 +292,7 @@ struct fc_bsg_request {
                struct fc_bsg_rport_els         r_els;
                struct fc_bsg_rport_ct          r_ct;
        } rqst_data;
-} __attribute__((packed));
+};
 
 
 /* response (request sense data) structure of the sg_io_v4 */
index 7dc97d12253c1bdc494b939e5e856b1b1fa6fc2c..6a664c3f7c1e426dd0b74129ce9cf81cb07fefa0 100644 (file)
@@ -1707,7 +1707,6 @@ struct snd_emu10k1 {
        unsigned int card_type;                 /* EMU10K1_CARD_* */
        unsigned int ecard_ctrl;                /* ecard control bits */
        unsigned long dma_mask;                 /* PCI DMA mask */
-       unsigned int delay_pcm_irq;             /* in samples */
        int max_cache_pages;                    /* max memory size / PAGE_SIZE */
        struct snd_dma_buffer silent_page;      /* silent page */
        struct snd_dma_buffer ptb_pages;        /* page table pages */
index 4b6a4a3001677d1482caac5df7e1d6167dd025c2..dacb8ef6700071238a3293799f57610909c6f22f 100644 (file)
@@ -43,8 +43,7 @@
                tstruct                                         \
                char                    __data[0];              \
        };                                                      \
-       static struct ftrace_event_call                 \
-       __attribute__((__aligned__(4))) event_##name
+       static struct ftrace_event_call event_##name
 
 #undef __cpparg
 #define __cpparg(arg...) arg
index 6b1ad6f388f9207de36daee1f0112c857cf145d5..f7ae1992a774cd062541390038d3c2a22d1cf3d4 100644 (file)
@@ -455,8 +455,7 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
                                         compress_name);
                                message = msg_buf;
                        }
-               } else
-                       error("junk in compressed archive");
+               }
                if (state != Reset)
                        error("junk in compressed archive");
                else
index bc109c70648677829584ce702d6d32d9e7953996..4051d75dd2d64e765b5d2856eac0da7fa796ef95 100644 (file)
@@ -369,6 +369,12 @@ static void __init smp_init(void)
 {
        unsigned int cpu;
 
+       /*
+        * Set up the current CPU as possible to migrate to.
+        * The other ones will be done by cpu_up/cpu_down()
+        */
+       set_cpu_active(smp_processor_id(), true);
+
        /* FIXME: This should be done in userspace --RR */
        for_each_present_cpu(cpu) {
                if (num_online_cpus() >= setup_max_cpus)
@@ -480,7 +486,6 @@ static void __init boot_cpu_init(void)
        int cpu = smp_processor_id();
        /* Mark the boot cpu "present", "online" etc for SMP and UP case */
        set_cpu_online(cpu, true);
-       set_cpu_active(cpu, true);
        set_cpu_present(cpu, true);
        set_cpu_possible(cpu, true);
 }
@@ -846,7 +851,7 @@ static int __init kernel_init(void * unused)
        /*
         * init can allocate pages on any node
         */
-       set_mems_allowed(node_states[N_HIGH_MEMORY]);
+       set_mems_allowed(node_possible_map);
        /*
         * init can run on any cpu.
         */
index 5e3e3a15661bc5c97674081974d8ee51ef135445..ab76fb0ef8443e373c845b4a82510f09d64a1e79 100644 (file)
@@ -242,8 +242,6 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
        struct semid64_ds __user *up64;
        int version = compat_ipc_parse_version(&third);
 
-       memset(&s64, 0, sizeof(s64));
-
        if (!uptr)
                return -EINVAL;
        if (get_user(pad, (u32 __user *) uptr))
@@ -424,8 +422,6 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
        int version = compat_ipc_parse_version(&second);
        void __user *p;
 
-       memset(&m64, 0, sizeof(m64));
-
        switch (second & (~IPC_64)) {
        case IPC_INFO:
        case IPC_RMID:
@@ -599,8 +595,6 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
        int err, err2;
        int version = compat_ipc_parse_version(&second);
 
-       memset(&s64, 0, sizeof(s64));
-
        switch (second & (~IPC_64)) {
        case IPC_RMID:
        case SHM_LOCK:
index 380ea4fe08e7151c71c64a39eac8a9e92a2ea7ee..d8d1e9ff4e8869ba1c9ebe6300f37dec6c62e89b 100644 (file)
@@ -53,9 +53,6 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
        void __user *p = NULL;
        if (u_attr && oflag & O_CREAT) {
                struct mq_attr attr;
-
-               memset(&attr, 0, sizeof(attr));
-
                p = compat_alloc_user_space(sizeof(attr));
                if (get_compat_mq_attr(&attr, u_attr) ||
                    copy_to_user(p, &attr, sizeof(attr)))
@@ -130,8 +127,6 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
        struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
        long ret;
 
-       memset(&mqstat, 0, sizeof(mqstat));
-
        if (u_mqstat) {
                if (get_compat_mq_attr(&mqstat, u_mqstat) ||
                    copy_to_user(p, &mqstat, sizeof(mqstat)))
index d01bc14a9b3713f6f62ed6c2599a1ca95b0cf0e2..ee9d69707c0afafc9d5a6d0d9b46146e4fafe3e4 100644 (file)
@@ -706,7 +706,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
        dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
        if (IS_ERR(dentry)) {
                error = PTR_ERR(dentry);
-               goto out_putfd;
+               goto out_err;
        }
        mntget(ipc_ns->mq_mnt);
 
@@ -744,6 +744,7 @@ out:
        mntput(ipc_ns->mq_mnt);
 out_putfd:
        put_unused_fd(fd);
+out_err:
        fd = error;
 out_upsem:
        mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
index b781007eea46aa02355303c48c52c071a9931b74..2f2a47959576964c200e7493464818da982c3e7a 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -560,8 +560,6 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
            {
                struct semid_ds out;
 
-               memset(&out, 0, sizeof(out));
-
                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
 
                out.sem_otime   = in->sem_otime;
index d30732c97599d6e0a516ccfc305cef1d76bc2304..e9b039f74129e3e5e47d34c1e91a128b29fa45df 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -474,7 +474,6 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_
            {
                struct shmid_ds out;
 
-               memset(&out, 0, sizeof(out));
                ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
                out.shm_segsz   = in->shm_segsz;
                out.shm_atime   = in->shm_atime;
index a9b6bc3fa44a9430b9cb404f1d1b3ef70aafce34..67b155f65a6c944b100a684d651b7a2b65fc3e17 100644 (file)
@@ -47,20 +47,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
                            struct freezer, css);
 }
 
-int cgroup_freezing_or_frozen(struct task_struct *task)
+int cgroup_frozen(struct task_struct *task)
 {
        struct freezer *freezer;
        enum freezer_state state;
 
        task_lock(task);
        freezer = task_freezer(task);
-       if (!freezer->css.cgroup->parent)
-               state = CGROUP_THAWED; /* root cgroup can't be frozen */
-       else
-               state = freezer->state;
+       state = freezer->state;
        task_unlock(task);
 
-       return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+       return state == CGROUP_FROZEN;
 }
 
 /*
index 8bc557869d90a9b5ebc47f6de0b5abca05aaa203..f6c204f07ea6084c4849d52358d6b1b2aab1f0da 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/posix-timers.h>
 #include <linux/times.h>
 #include <linux/ptrace.h>
-#include <linux/module.h>
 
 #include <asm/uaccess.h>
 
@@ -495,26 +494,29 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
 {
        int ret;
        cpumask_var_t mask;
+       unsigned long *k;
+       unsigned int min_length = cpumask_size();
 
-       if ((len * BITS_PER_BYTE) < nr_cpu_ids)
-               return -EINVAL;
-       if (len & (sizeof(compat_ulong_t)-1))
+       if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
+               min_length = sizeof(compat_ulong_t);
+
+       if (len < min_length)
                return -EINVAL;
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
                return -ENOMEM;
 
        ret = sched_getaffinity(pid, mask);
-       if (ret == 0) {
-               size_t retlen = min_t(size_t, len, cpumask_size());
+       if (ret < 0)
+               goto out;
 
-               if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
-                       ret = -EFAULT;
-               else
-                       ret = retlen;
-       }
-       free_cpumask_var(mask);
+       k = cpumask_bits(mask);
+       ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
+       if (ret == 0)
+               ret = min_length;
 
+out:
+       free_cpumask_var(mask);
        return ret;
 }
 
@@ -1137,24 +1139,3 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
 
        return 0;
 }
-
-/*
- * Allocate user-space memory for the duration of a single system call,
- * in order to marshall parameters inside a compat thunk.
- */
-void __user *compat_alloc_user_space(unsigned long len)
-{
-       void __user *ptr;
-
-       /* If len would occupy more than half of the entire compat space... */
-       if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
-               return NULL;
-
-       ptr = arch_compat_alloc_user_space(len);
-
-       if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
-               return NULL;
-
-       return ptr;
-}
-EXPORT_SYMBOL_GPL(compat_alloc_user_space);
index 7e8b6acd2e857f722048bb646334390d7892afc0..291ac586f37ff93e047391be46b3db90d5f80445 100644 (file)
@@ -151,7 +151,7 @@ static inline void check_for_tasks(int cpu)
 
        write_lock_irq(&tasklist_lock);
        for_each_process(p) {
-               if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
+               if (task_cpu(p) == cpu &&
                    (!cputime_eq(p->utime, cputime_zero) ||
                     !cputime_eq(p->stime, cputime_zero)))
                        printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
@@ -163,7 +163,6 @@ static inline void check_for_tasks(int cpu)
 }
 
 struct take_cpu_down_param {
-       struct task_struct *caller;
        unsigned long mod;
        void *hcpu;
 };
@@ -172,7 +171,6 @@ struct take_cpu_down_param {
 static int __ref take_cpu_down(void *_param)
 {
        struct take_cpu_down_param *param = _param;
-       unsigned int cpu = (unsigned long)param->hcpu;
        int err;
 
        /* Ensure this CPU doesn't handle any more interrupts. */
@@ -183,8 +181,6 @@ static int __ref take_cpu_down(void *_param)
        raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
                                param->hcpu);
 
-       if (task_cpu(param->caller) == cpu)
-               move_task_off_dead_cpu(cpu, param->caller);
        /* Force idle task to run as soon as we yield: it should
           immediately notice cpu is offline and die quickly. */
        sched_idle_next();
@@ -195,10 +191,10 @@ static int __ref take_cpu_down(void *_param)
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
        int err, nr_calls = 0;
+       cpumask_var_t old_allowed;
        void *hcpu = (void *)(long)cpu;
        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
        struct take_cpu_down_param tcd_param = {
-               .caller = current,
                .mod = mod,
                .hcpu = hcpu,
        };
@@ -209,8 +205,10 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
        if (!cpu_online(cpu))
                return -EINVAL;
 
+       if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+               return -ENOMEM;
+
        cpu_hotplug_begin();
-       set_cpu_active(cpu, false);
        err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
                                        hcpu, -1, &nr_calls);
        if (err == NOTIFY_BAD) {
@@ -225,6 +223,10 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
                goto out_release;
        }
 
+       /* Ensure that we are not runnable on dying cpu */
+       cpumask_copy(old_allowed, &current->cpus_allowed);
+       set_cpus_allowed_ptr(current, cpu_active_mask);
+
        err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
        if (err) {
                set_cpu_active(cpu, true);
@@ -233,7 +235,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
                                            hcpu) == NOTIFY_BAD)
                        BUG();
 
-               goto out_release;
+               goto out_allowed;
        }
        BUG_ON(cpu_online(cpu));
 
@@ -251,6 +253,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 
        check_for_tasks(cpu);
 
+out_allowed:
+       set_cpus_allowed_ptr(current, old_allowed);
 out_release:
        cpu_hotplug_done();
        if (!err) {
@@ -258,6 +262,7 @@ out_release:
                                            hcpu) == NOTIFY_BAD)
                        BUG();
        }
+       free_cpumask_var(old_allowed);
        return err;
 }
 
@@ -275,6 +280,18 @@ int __ref cpu_down(unsigned int cpu)
                goto out;
        }
 
+       set_cpu_active(cpu, false);
+
+       /*
+        * Make sure the all cpus did the reschedule and are not
+        * using stale version of the cpu_active_mask.
+        * This is not strictly necessary becuase stop_machine()
+        * that we run down the line already provides the required
+        * synchronization. But it's really a side effect and we do not
+        * want to depend on the innards of the stop_machine here.
+        */
+       synchronize_sched();
+
        err = _cpu_down(cpu, 0);
 
 out:
@@ -365,12 +382,19 @@ int disable_nonboot_cpus(void)
                return error;
        cpu_maps_update_begin();
        first_cpu = cpumask_first(cpu_online_mask);
-       /*
-        * We take down all of the non-boot CPUs in one shot to avoid races
+       /* We take down all of the non-boot CPUs in one shot to avoid races
         * with the userspace trying to use the CPU hotplug at the same time
         */
        cpumask_clear(frozen_cpus);
 
+       for_each_online_cpu(cpu) {
+               if (cpu == first_cpu)
+                       continue;
+               set_cpu_active(cpu, false);
+       }
+
+       synchronize_sched();
+
        printk("Disabling non-boot CPUs ...\n");
        for_each_online_cpu(cpu) {
                if (cpu == first_cpu)
index b97da1c5e9e3b41870e8a308fb9c15900eb9342a..5d03d305a2b1bbe98d0b3904109f1ae076282387 100644 (file)
@@ -921,6 +921,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
  *    call to guarantee_online_mems(), as we know no one is changing
  *    our task's cpuset.
  *
+ *    Hold callback_mutex around the two modifications of our tasks
+ *    mems_allowed to synchronize with cpuset_mems_allowed().
+ *
  *    While the mm_struct we are migrating is typically from some
  *    other task, the task_struct mems_allowed that we are hacking
  *    is for our current task, which must allocate new pages for that
@@ -1396,10 +1399,11 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
 
        if (cs == &top_cpuset) {
                cpumask_copy(cpus_attach, cpu_possible_mask);
+               to = node_possible_map;
        } else {
                guarantee_online_cpus(cs, cpus_attach);
+               guarantee_online_mems(cs, &to);
        }
-       guarantee_online_mems(cs, &to);
 
        /* do per-task migration stuff possibly for each in the threadgroup */
        cpuset_attach_task(tsk, &to, cs);
@@ -2094,23 +2098,15 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
 static int cpuset_track_online_nodes(struct notifier_block *self,
                                unsigned long action, void *arg)
 {
-       nodemask_t oldmems;
-
        cgroup_lock();
        switch (action) {
        case MEM_ONLINE:
-               oldmems = top_cpuset.mems_allowed;
+       case MEM_OFFLINE:
                mutex_lock(&callback_mutex);
                top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
                mutex_unlock(&callback_mutex);
-               update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
-               break;
-       case MEM_OFFLINE:
-               /*
-                * needn't update top_cpuset.mems_allowed explicitly because
-                * scan_for_empty_cpusets() will update it.
-                */
-               scan_for_empty_cpusets(&top_cpuset);
+               if (action == MEM_OFFLINE)
+                       scan_for_empty_cpusets(&top_cpuset);
                break;
        default:
                break;
@@ -2152,52 +2148,19 @@ void __init cpuset_init_smp(void)
 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 {
        mutex_lock(&callback_mutex);
-       task_lock(tsk);
-       guarantee_online_cpus(task_cs(tsk), pmask);
-       task_unlock(tsk);
+       cpuset_cpus_allowed_locked(tsk, pmask);
        mutex_unlock(&callback_mutex);
 }
 
-int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
+/**
+ * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
+ * Must be called with callback_mutex held.
+ **/
+void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
 {
-       const struct cpuset *cs;
-       int cpu;
-
-       rcu_read_lock();
-       cs = task_cs(tsk);
-       if (cs)
-               cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
-       rcu_read_unlock();
-
-       /*
-        * We own tsk->cpus_allowed, nobody can change it under us.
-        *
-        * But we used cs && cs->cpus_allowed lockless and thus can
-        * race with cgroup_attach_task() or update_cpumask() and get
-        * the wrong tsk->cpus_allowed. However, both cases imply the
-        * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
-        * which takes task_rq_lock().
-        *
-        * If we are called after it dropped the lock we must see all
-        * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
-        * set any mask even if it is not right from task_cs() pov,
-        * the pending set_cpus_allowed_ptr() will fix things.
-        */
-
-       cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
-       if (cpu >= nr_cpu_ids) {
-               /*
-                * Either tsk->cpus_allowed is wrong (see above) or it
-                * is actually empty. The latter case is only possible
-                * if we are racing with remove_tasks_in_empty_cpuset().
-                * Like above we can temporary set any mask and rely on
-                * set_cpus_allowed_ptr() as synchronization point.
-                */
-               cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
-               cpu = cpumask_any(cpu_active_mask);
-       }
-
-       return cpu;
+       task_lock(tsk);
+       guarantee_online_cpus(task_cs(tsk), pmask);
+       task_unlock(tsk);
 }
 
 void cpuset_init_current_mems_allowed(void)
@@ -2385,6 +2348,22 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
        return 0;
 }
 
+/**
+ * cpuset_lock - lock out any changes to cpuset structures
+ *
+ * The out of memory (oom) code needs to mutex_lock cpusets
+ * from being changed while it scans the tasklist looking for a
+ * task in an overlapping cpuset.  Expose callback_mutex via this
+ * cpuset_lock() routine, so the oom code can lock it, before
+ * locking the task list.  The tasklist_lock is a spinlock, so
+ * must be taken inside callback_mutex.
+ */
+
+void cpuset_lock(void)
+{
+       mutex_lock(&callback_mutex);
+}
+
 /**
  * cpuset_unlock - release lock on cpuset changes
  *
index 099f5e6fb94c919dcab366e10cbb143743fdd094..1ed8ca18790c1e937208af47bed6695f4218858b 100644 (file)
@@ -786,6 +786,8 @@ bool creds_are_invalid(const struct cred *cred)
 {
        if (cred->magic != CRED_MAGIC)
                return true;
+       if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
+               return true;
 #ifdef CONFIG_SECURITY_SELINUX
        if (selinux_is_enabled()) {
                if ((unsigned long) cred->security < PAGE_SIZE)
index 570255f541e507b416ecb51c89db8b0adf0cffe9..f7864ac2ecc1ad54c0af6b06b6f9d2da4a93f1ac 100644 (file)
@@ -110,8 +110,8 @@ static void __exit_signal(struct task_struct *tsk)
                 * We won't ever get here for the group leader, since it
                 * will have been the last reference on the signal_struct.
                 */
-               sig->utime = cputime_add(sig->utime, tsk->utime);
-               sig->stime = cputime_add(sig->stime, tsk->stime);
+               sig->utime = cputime_add(sig->utime, task_utime(tsk));
+               sig->stime = cputime_add(sig->stime, task_stime(tsk));
                sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
                sig->min_flt += tsk->min_flt;
                sig->maj_flt += tsk->maj_flt;
@@ -899,15 +899,6 @@ NORET_TYPE void do_exit(long code)
        if (unlikely(!tsk->pid))
                panic("Attempted to kill the idle task!");
 
-       /*
-        * If do_exit is called because this processes oopsed, it's possible
-        * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
-        * continuing. Amongst other possible reasons, this is to prevent
-        * mm_release()->clear_child_tid() from writing to a user-controlled
-        * kernel address.
-        */
-       set_fs(USER_DS);
-
        tracehook_report_exit(&code);
 
        validate_creds_for_do_exit(tsk);
@@ -1214,7 +1205,6 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                struct signal_struct *psig;
                struct signal_struct *sig;
                unsigned long maxrss;
-               cputime_t tgutime, tgstime;
 
                /*
                 * The resource counters for the group leader are in its
@@ -1230,23 +1220,20 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                 * need to protect the access to parent->signal fields,
                 * as other threads in the parent group can be right
                 * here reaping other children at the same time.
-                *
-                * We use thread_group_times() to get times for the thread
-                * group, which consolidates times for all threads in the
-                * group including the group leader.
                 */
-               thread_group_times(p, &tgutime, &tgstime);
                spin_lock_irq(&p->real_parent->sighand->siglock);
                psig = p->real_parent->signal;
                sig = p->signal;
                psig->cutime =
                        cputime_add(psig->cutime,
-                       cputime_add(tgutime,
-                                   sig->cutime));
+                       cputime_add(p->utime,
+                       cputime_add(sig->utime,
+                                   sig->cutime)));
                psig->cstime =
                        cputime_add(psig->cstime,
-                       cputime_add(tgstime,
-                                   sig->cstime));
+                       cputime_add(p->stime,
+                       cputime_add(sig->stime,
+                                   sig->cstime)));
                psig->cgtime =
                        cputime_add(psig->cgtime,
                        cputime_add(p->gtime,
@@ -1383,7 +1370,8 @@ static int wait_task_stopped(struct wait_opts *wo,
        if (!unlikely(wo->wo_flags & WNOWAIT))
                *p_code = 0;
 
-       uid = task_uid(p);
+       /* don't need the RCU readlock here as we're holding a spinlock */
+       uid = __task_cred(p)->uid;
 unlock_sig:
        spin_unlock_irq(&p->sighand->siglock);
        if (!exit_code)
@@ -1456,7 +1444,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
        }
        if (!unlikely(wo->wo_flags & WNOWAIT))
                p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
-       uid = task_uid(p);
+       uid = __task_cred(p)->uid;
        spin_unlock_irq(&p->sighand->siglock);
 
        pid = task_pid_vnr(p);
index 2ac4cbbc4f502a06e865b8f99462832f097e2a87..2cfa6dcfd44f0c015007b4c69761e83b0f1ff286 100644 (file)
@@ -293,7 +293,7 @@ out:
 #ifdef CONFIG_MMU
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+       struct vm_area_struct *mpnt, *tmp, **pprev;
        struct rb_node **rb_link, *rb_parent;
        int retval;
        unsigned long charge;
@@ -321,7 +321,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
        if (retval)
                goto out;
 
-       prev = NULL;
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
                struct file *file;
 
@@ -350,7 +349,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                vma_set_policy(tmp, pol);
                tmp->vm_flags &= ~VM_LOCKED;
                tmp->vm_mm = mm;
-               tmp->vm_next = tmp->vm_prev = NULL;
+               tmp->vm_next = NULL;
                anon_vma_link(tmp);
                file = tmp->vm_file;
                if (file) {
@@ -384,8 +383,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                 */
                *pprev = tmp;
                pprev = &tmp->vm_next;
-               tmp->vm_prev = prev;
-               prev = tmp;
 
                __vma_link_rb(mm, tmp, rb_link, rb_parent);
                rb_link = &tmp->vm_rb.rb_right;
@@ -903,9 +900,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
        sig->gtime = cputime_zero;
        sig->cgtime = cputime_zero;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-       sig->prev_utime = sig->prev_stime = cputime_zero;
-#endif
        sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
        sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
        sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
@@ -1145,6 +1139,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        p->bts = NULL;
 
+       p->stack_start = stack_start;
+
        /* Perform scheduler related setup. Assign this task to a CPU. */
        sched_fork(p, clone_flags);
 
@@ -1249,6 +1245,21 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        /* Need tasklist lock for parent etc handling! */
        write_lock_irq(&tasklist_lock);
 
+       /*
+        * The task hasn't been attached yet, so its cpus_allowed mask will
+        * not be changed, nor will its assigned CPU.
+        *
+        * The cpus_allowed mask of the parent may have changed after it was
+        * copied first time - so re-copy it here, then check the child's CPU
+        * to ensure it is on a valid CPU (and if not, just force it back to
+        * parent's CPU). This avoids alot of nasty races.
+        */
+       p->cpus_allowed = current->cpus_allowed;
+       p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
+       if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
+                       !cpu_online(task_cpu(p))))
+               set_task_cpu(p, smp_processor_id());
+
        /* CLONE_PARENT re-uses the old parent */
        if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
                p->real_parent = current->real_parent;
index 06d08e5eb2fc96d5089220abd5021e31817417af..c6a80616fcfd05362953b53d5808ff163acf57cf 100644 (file)
@@ -461,11 +461,20 @@ static void free_pi_state(struct futex_pi_state *pi_state)
 static struct task_struct * futex_find_get_task(pid_t pid)
 {
        struct task_struct *p;
+       const struct cred *cred = current_cred(), *pcred;
 
        rcu_read_lock();
        p = find_task_by_vpid(pid);
-       if (p)
-               get_task_struct(p);
+       if (!p) {
+               p = ERR_PTR(-ESRCH);
+       } else {
+               pcred = __task_cred(p);
+               if (cred->euid != pcred->euid &&
+                   cred->euid != pcred->uid)
+                       p = ERR_PTR(-ESRCH);
+               else
+                       get_task_struct(p);
+       }
 
        rcu_read_unlock();
 
@@ -587,8 +596,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        if (!pid)
                return -ESRCH;
        p = futex_find_get_task(pid);
-       if (!p)
-               return -ESRCH;
+       if (IS_ERR(p))
+               return PTR_ERR(p);
 
        /*
         * We need to look at the task state flags to figure out,
@@ -1395,6 +1404,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
 {
        struct futex_hash_bucket *hb;
 
+       get_futex_key_refs(&q->key);
        hb = hash_futex(&q->key);
        q->lock_ptr = &hb->lock;
 
@@ -1406,6 +1416,7 @@ static inline void
 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
 {
        spin_unlock(&hb->lock);
+       drop_futex_key_refs(&q->key);
 }
 
 /**
@@ -1510,6 +1521,8 @@ static void unqueue_me_pi(struct futex_q *q)
        q->pi_state = NULL;
 
        spin_unlock(q->lock_ptr);
+
+       drop_futex_key_refs(&q->key);
 }
 
 /*
@@ -1840,10 +1853,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
        }
 
 retry:
-       /*
-        * Prepare to wait on uaddr. On success, holds hb lock and increments
-        * q.key refs.
-        */
+       /* Prepare to wait on uaddr. */
        ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
        if (ret)
                goto out;
@@ -1853,23 +1863,24 @@ retry:
 
        /* If we were woken (and unqueued), we succeeded, whatever. */
        ret = 0;
-       /* unqueue_me() drops q.key ref */
        if (!unqueue_me(&q))
-               goto out;
+               goto out_put_key;
        ret = -ETIMEDOUT;
        if (to && !to->task)
-               goto out;
+               goto out_put_key;
 
        /*
         * We expect signal_pending(current), but we might be the
         * victim of a spurious wakeup as well.
         */
-       if (!signal_pending(current))
+       if (!signal_pending(current)) {
+               put_futex_key(fshared, &q.key);
                goto retry;
+       }
 
        ret = -ERESTARTSYS;
        if (!abs_time)
-               goto out;
+               goto out_put_key;
 
        restart = &current_thread_info()->restart_block;
        restart->fn = futex_wait_restart;
@@ -1886,6 +1897,8 @@ retry:
 
        ret = -ERESTART_RESTARTBLOCK;
 
+out_put_key:
+       put_futex_key(fshared, &q.key);
 out:
        if (to) {
                hrtimer_cancel(&to->timer);
@@ -2264,10 +2277,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
        q.rt_waiter = &rt_waiter;
        q.requeue_pi_key = &key2;
 
-       /*
-        * Prepare to wait on uaddr. On success, increments q.key (key1) ref
-        * count.
-        */
+       /* Prepare to wait on uaddr. */
        ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
        if (ret)
                goto out_key2;
@@ -2285,9 +2295,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
         * In order for us to be here, we know our q.key == key2, and since
         * we took the hb->lock above, we also know that futex_requeue() has
         * completed and we no longer have to concern ourselves with a wakeup
-        * race with the atomic proxy lock acquisition by the requeue code. The
-        * futex_requeue dropped our key1 reference and incremented our key2
-        * reference count.
+        * race with the atomic proxy lock acquition by the requeue code.
         */
 
        /* Check if the requeue code acquired the second futex for us. */
index f83972b16564d00676154900f09d3c70affd773c..ef3c3f88a7a35e36d8f1fb551c56e534cea52687 100644 (file)
  * @children: child nodes
  * @all: list head for list of all nodes
  * @parent: parent node
- * @loaded_info: array of pointers to profiling data sets for loaded object
- *   files.
- * @num_loaded: number of profiling data sets for loaded object files.
- * @unloaded_info: accumulated copy of profiling data sets for unloaded
- *   object files. Used only when gcov_persist=1.
+ * @info: associated profiling data structure if not a directory
+ * @ghost: when an object file containing profiling data is unloaded we keep a
+ *         copy of the profiling data here to allow collecting coverage data
+ *         for cleanup code. Such a node is called a "ghost".
  * @dentry: main debugfs entry, either a directory or data file
  * @links: associated symbolic links
  * @name: data file basename
@@ -52,11 +51,10 @@ struct gcov_node {
        struct list_head children;
        struct list_head all;
        struct gcov_node *parent;
-       struct gcov_info **loaded_info;
-       struct gcov_info *unloaded_info;
+       struct gcov_info *info;
+       struct gcov_info *ghost;
        struct dentry *dentry;
        struct dentry **links;
-       int num_loaded;
        char name[0];
 };
 
@@ -138,37 +136,16 @@ static const struct seq_operations gcov_seq_ops = {
 };
 
 /*
- * Return a profiling data set associated with the given node. This is
- * either a data set for a loaded object file or a data set copy in case
- * all associated object files have been unloaded.
+ * Return the profiling data set for a given node. This can either be the
+ * original profiling data structure or a duplicate (also called "ghost")
+ * in case the associated object file has been unloaded.
  */
 static struct gcov_info *get_node_info(struct gcov_node *node)
 {
-       if (node->num_loaded > 0)
-               return node->loaded_info[0];
+       if (node->info)
+               return node->info;
 
-       return node->unloaded_info;
-}
-
-/*
- * Return a newly allocated profiling data set which contains the sum of
- * all profiling data associated with the given node.
- */
-static struct gcov_info *get_accumulated_info(struct gcov_node *node)
-{
-       struct gcov_info *info;
-       int i = 0;
-
-       if (node->unloaded_info)
-               info = gcov_info_dup(node->unloaded_info);
-       else
-               info = gcov_info_dup(node->loaded_info[i++]);
-       if (!info)
-               return NULL;
-       for (; i < node->num_loaded; i++)
-               gcov_info_add(info, node->loaded_info[i]);
-
-       return info;
+       return node->ghost;
 }
 
 /*
@@ -186,10 +163,9 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
        mutex_lock(&node_lock);
        /*
         * Read from a profiling data copy to minimize reference tracking
-        * complexity and concurrent access and to keep accumulating multiple
-        * profiling data sets associated with one node simple.
+        * complexity and concurrent access.
         */
-       info = get_accumulated_info(node);
+       info = gcov_info_dup(get_node_info(node));
        if (!info)
                goto out_unlock;
        iter = gcov_iter_new(info);
@@ -249,25 +225,12 @@ static struct gcov_node *get_node_by_name(const char *name)
        return NULL;
 }
 
-/*
- * Reset all profiling data associated with the specified node.
- */
-static void reset_node(struct gcov_node *node)
-{
-       int i;
-
-       if (node->unloaded_info)
-               gcov_info_reset(node->unloaded_info);
-       for (i = 0; i < node->num_loaded; i++)
-               gcov_info_reset(node->loaded_info[i]);
-}
-
 static void remove_node(struct gcov_node *node);
 
 /*
  * write() implementation for gcov data files. Reset profiling data for the
- * corresponding file. If all associated object files have been unloaded,
- * remove the debug fs node as well.
+ * associated file. If the object file has been unloaded (i.e. this is
+ * a "ghost" node), remove the debug fs node as well.
  */
 static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
                              size_t len, loff_t *pos)
@@ -282,10 +245,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
        node = get_node_by_name(info->filename);
        if (node) {
                /* Reset counts or remove node for unloaded modules. */
-               if (node->num_loaded == 0)
+               if (node->ghost)
                        remove_node(node);
                else
-                       reset_node(node);
+                       gcov_info_reset(node->info);
        }
        /* Reset counts for open file. */
        gcov_info_reset(info);
@@ -415,10 +378,7 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
        INIT_LIST_HEAD(&node->list);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->all);
-       if (node->loaded_info) {
-               node->loaded_info[0] = info;
-               node->num_loaded = 1;
-       }
+       node->info = info;
        node->parent = parent;
        if (name)
                strcpy(node->name, name);
@@ -434,13 +394,9 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        struct gcov_node *node;
 
        node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
-       if (!node)
-               goto err_nomem;
-       if (info) {
-               node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
-                                          GFP_KERNEL);
-               if (!node->loaded_info)
-                       goto err_nomem;
+       if (!node) {
+               pr_warning("out of memory\n");
+               return NULL;
        }
        init_node(node, info, name, parent);
        /* Differentiate between gcov data file nodes and directory nodes. */
@@ -460,11 +416,6 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        list_add(&node->all, &all_head);
 
        return node;
-
-err_nomem:
-       kfree(node);
-       pr_warning("out of memory\n");
-       return NULL;
 }
 
 /* Remove symbolic links associated with node. */
@@ -490,9 +441,8 @@ static void release_node(struct gcov_node *node)
        list_del(&node->all);
        debugfs_remove(node->dentry);
        remove_links(node);
-       kfree(node->loaded_info);
-       if (node->unloaded_info)
-               gcov_info_free(node->unloaded_info);
+       if (node->ghost)
+               gcov_info_free(node->ghost);
        kfree(node);
 }
 
@@ -527,7 +477,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
 
 /*
  * write() implementation for reset file. Reset all profiling data to zero
- * and remove nodes for which all associated object files are unloaded.
+ * and remove ghost nodes.
  */
 static ssize_t reset_write(struct file *file, const char __user *addr,
                           size_t len, loff_t *pos)
@@ -537,8 +487,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
        mutex_lock(&node_lock);
 restart:
        list_for_each_entry(node, &all_head, all) {
-               if (node->num_loaded > 0)
-                       reset_node(node);
+               if (node->info)
+                       gcov_info_reset(node->info);
                else if (list_empty(&node->children)) {
                        remove_node(node);
                        /* Several nodes may have gone - restart loop. */
@@ -614,115 +564,37 @@ err_remove:
 }
 
 /*
- * Associate a profiling data set with an existing node. Needs to be called
- * with node_lock held.
+ * The profiling data set associated with this node is being unloaded. Store a
+ * copy of the profiling data and turn this node into a "ghost".
  */
-static void add_info(struct gcov_node *node, struct gcov_info *info)
+static int ghost_node(struct gcov_node *node)
 {
-       struct gcov_info **loaded_info;
-       int num = node->num_loaded;
-
-       /*
-        * Prepare new array. This is done first to simplify cleanup in
-        * case the new data set is incompatible, the node only contains
-        * unloaded data sets and there's not enough memory for the array.
-        */
-       loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
-       if (!loaded_info) {
-               pr_warning("could not add '%s' (out of memory)\n",
-                          info->filename);
-               return;
-       }
-       memcpy(loaded_info, node->loaded_info,
-              num * sizeof(struct gcov_info *));
-       loaded_info[num] = info;
-       /* Check if the new data set is compatible. */
-       if (num == 0) {
-               /*
-                * A module was unloaded, modified and reloaded. The new
-                * data set replaces the copy of the last one.
-                */
-               if (!gcov_info_is_compatible(node->unloaded_info, info)) {
-                       pr_warning("discarding saved data for %s "
-                                  "(incompatible version)\n", info->filename);
-                       gcov_info_free(node->unloaded_info);
-                       node->unloaded_info = NULL;
-               }
-       } else {
-               /*
-                * Two different versions of the same object file are loaded.
-                * The initial one takes precedence.
-                */
-               if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
-                       pr_warning("could not add '%s' (incompatible "
-                                  "version)\n", info->filename);
-                       kfree(loaded_info);
-                       return;
-               }
+       node->ghost = gcov_info_dup(node->info);
+       if (!node->ghost) {
+               pr_warning("could not save data for '%s' (out of memory)\n",
+                          node->info->filename);
+               return -ENOMEM;
        }
-       /* Overwrite previous array. */
-       kfree(node->loaded_info);
-       node->loaded_info = loaded_info;
-       node->num_loaded = num + 1;
-}
+       node->info = NULL;
 
-/*
- * Return the index of a profiling data set associated with a node.
- */
-static int get_info_index(struct gcov_node *node, struct gcov_info *info)
-{
-       int i;
-
-       for (i = 0; i < node->num_loaded; i++) {
-               if (node->loaded_info[i] == info)
-                       return i;
-       }
-       return -ENOENT;
+       return 0;
 }
 
 /*
- * Save the data of a profiling data set which is being unloaded.
+ * Profiling data for this node has been loaded again. Add profiling data
+ * from previous instantiation and turn this node into a regular node.
  */
-static void save_info(struct gcov_node *node, struct gcov_info *info)
+static void revive_node(struct gcov_node *node, struct gcov_info *info)
 {
-       if (node->unloaded_info)
-               gcov_info_add(node->unloaded_info, info);
+       if (gcov_info_is_compatible(node->ghost, info))
+               gcov_info_add(info, node->ghost);
        else {
-               node->unloaded_info = gcov_info_dup(info);
-               if (!node->unloaded_info) {
-                       pr_warning("could not save data for '%s' "
-                                  "(out of memory)\n", info->filename);
-               }
-       }
-}
-
-/*
- * Disassociate a profiling data set from a node. Needs to be called with
- * node_lock held.
- */
-static void remove_info(struct gcov_node *node, struct gcov_info *info)
-{
-       int i;
-
-       i = get_info_index(node, info);
-       if (i < 0) {
-               pr_warning("could not remove '%s' (not found)\n",
+               pr_warning("discarding saved data for '%s' (version changed)\n",
                           info->filename);
-               return;
        }
-       if (gcov_persist)
-               save_info(node, info);
-       /* Shrink array. */
-       node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
-       node->num_loaded--;
-       if (node->num_loaded > 0)
-               return;
-       /* Last loaded data set was removed. */
-       kfree(node->loaded_info);
-       node->loaded_info = NULL;
-       node->num_loaded = 0;
-       if (!node->unloaded_info)
-               remove_node(node);
+       gcov_info_free(node->ghost);
+       node->ghost = NULL;
+       node->info = info;
 }
 
 /*
@@ -737,18 +609,30 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
        node = get_node_by_name(info->filename);
        switch (action) {
        case GCOV_ADD:
-               if (node)
-                       add_info(node, info);
-               else
+               /* Add new node or revive ghost. */
+               if (!node) {
                        add_node(info);
+                       break;
+               }
+               if (gcov_persist)
+                       revive_node(node, info);
+               else {
+                       pr_warning("could not add '%s' (already exists)\n",
+                                  info->filename);
+               }
                break;
        case GCOV_REMOVE:
-               if (node)
-                       remove_info(node, info);
-               else {
+               /* Remove node or turn into ghost. */
+               if (!node) {
                        pr_warning("could not remove '%s' (not found)\n",
                                   info->filename);
+                       break;
                }
+               if (gcov_persist) {
+                       if (!ghost_node(node))
+                               break;
+               }
+               remove_node(node);
                break;
        }
        mutex_unlock(&node_lock);
index f0c2528f56fe4770f316a6450da05237c2915601..2b45b2ee3964f47b6470a791d366842e8437048e 100644 (file)
@@ -143,9 +143,10 @@ int groups_search(const struct group_info *group_info, gid_t grp)
        right = group_info->ngroups;
        while (left < right) {
                unsigned int mid = (left+right)/2;
-               if (grp > GROUP_AT(group_info, mid))
+               int cmp = grp - GROUP_AT(group_info, mid);
+               if (cmp > 0)
                        left = mid + 1;
-               else if (grp < GROUP_AT(group_info, mid))
+               else if (cmp < 0)
                        right = mid;
                else
                        return 1;
index a6e9d00a8323ffc22c0a1b1397cfb0e02c48907a..3e1c36e7998fdbeffa17142bade5c358c61a2857 100644 (file)
@@ -557,7 +557,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 static int hrtimer_reprogram(struct hrtimer *timer,
                             struct hrtimer_clock_base *base)
 {
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
        ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
        int res;
 
@@ -582,16 +582,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
        if (expires.tv64 < 0)
                return -ETIME;
 
-       if (expires.tv64 >= cpu_base->expires_next.tv64)
-               return 0;
-
-       /*
-        * If a hang was detected in the last timer interrupt then we
-        * do not schedule a timer which is earlier than the expiry
-        * which we enforced in the hang detection. We want the system
-        * to make progress.
-        */
-       if (cpu_base->hang_detected)
+       if (expires.tv64 >= expires_next->tv64)
                return 0;
 
        /*
@@ -599,7 +590,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
         */
        res = tick_program_event(expires, 0);
        if (!IS_ERR_VALUE(res))
-               cpu_base->expires_next = expires;
+               *expires_next = expires;
        return res;
 }
 
@@ -920,7 +911,6 @@ static inline int
 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
 {
        if (hrtimer_is_queued(timer)) {
-               unsigned long state;
                int reprogram;
 
                /*
@@ -934,13 +924,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
                debug_deactivate(timer);
                timer_stats_hrtimer_clear_start_info(timer);
                reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
-               /*
-                * We must preserve the CALLBACK state flag here,
-                * otherwise we could move the timer base in
-                * switch_hrtimer_base.
-                */
-               state = timer->state & HRTIMER_STATE_CALLBACK;
-               __remove_hrtimer(timer, base, state, reprogram);
+               __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
+                                reprogram);
                return 1;
        }
        return 0;
@@ -1227,14 +1212,34 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
                BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
                enqueue_hrtimer(timer, base);
        }
-
-       WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
-
        timer->state &= ~HRTIMER_STATE_CALLBACK;
 }
 
 #ifdef CONFIG_HIGH_RES_TIMERS
 
+static int force_clock_reprogram;
+
+/*
+ * After 5 iteration's attempts, we consider that hrtimer_interrupt()
+ * is hanging, which could happen with something that slows the interrupt
+ * such as the tracing. Then we force the clock reprogramming for each future
+ * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
+ * threshold that we will overwrite.
+ * The next tick event will be scheduled to 3 times we currently spend on
+ * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
+ * 1/4 of their time to process the hrtimer interrupts. This is enough to
+ * let it running without serious starvation.
+ */
+
+static inline void
+hrtimer_interrupt_hanging(struct clock_event_device *dev,
+                       ktime_t try_time)
+{
+       force_clock_reprogram = 1;
+       dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
+       printk(KERN_WARNING "hrtimer: interrupt too slow, "
+               "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
+}
 /*
  * High resolution timer interrupt
  * Called with interrupts disabled
@@ -1243,15 +1248,21 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 {
        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
        struct hrtimer_clock_base *base;
-       ktime_t expires_next, now, entry_time, delta;
-       int i, retries = 0;
+       ktime_t expires_next, now;
+       int nr_retries = 0;
+       int i;
 
        BUG_ON(!cpu_base->hres_active);
        cpu_base->nr_events++;
        dev->next_event.tv64 = KTIME_MAX;
 
-       entry_time = now = ktime_get();
-retry:
+ retry:
+       /* 5 retries is enough to notice a hang */
+       if (!(++nr_retries % 5))
+               hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
+
+       now = ktime_get();
+
        expires_next.tv64 = KTIME_MAX;
 
        spin_lock(&cpu_base->lock);
@@ -1313,48 +1324,10 @@ retry:
        spin_unlock(&cpu_base->lock);
 
        /* Reprogramming necessary ? */
-       if (expires_next.tv64 == KTIME_MAX ||
-           !tick_program_event(expires_next, 0)) {
-               cpu_base->hang_detected = 0;
-               return;
+       if (expires_next.tv64 != KTIME_MAX) {
+               if (tick_program_event(expires_next, force_clock_reprogram))
+                       goto retry;
        }
-
-       /*
-        * The next timer was already expired due to:
-        * - tracing
-        * - long lasting callbacks
-        * - being scheduled away when running in a VM
-        *
-        * We need to prevent that we loop forever in the hrtimer
-        * interrupt routine. We give it 3 attempts to avoid
-        * overreacting on some spurious event.
-        */
-       now = ktime_get();
-       cpu_base->nr_retries++;
-       if (++retries < 3)
-               goto retry;
-       /*
-        * Give the system a chance to do something else than looping
-        * here. We stored the entry time, so we know exactly how long
-        * we spent here. We schedule the next event this amount of
-        * time away.
-        */
-       cpu_base->nr_hangs++;
-       cpu_base->hang_detected = 1;
-       delta = ktime_sub(now, entry_time);
-       if (delta.tv64 > cpu_base->max_hang_time.tv64)
-               cpu_base->max_hang_time = delta;
-       /*
-        * Limit it to a sensible value as we enforce a longer
-        * delay. Give the CPU at least 100ms to catch up.
-        */
-       if (delta.tv64 > 100 * NSEC_PER_MSEC)
-               expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
-       else
-               expires_next = ktime_add(now, delta);
-       tick_program_event(expires_next, 1);
-       printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
-                   ktime_to_ns(delta));
 }
 
 /*
index e570d19ede7572c8b488c4bdbc4c6f3569a70a30..c1660194d1153a4b55adda2263b752547daa8563 100644 (file)
 
 #include "internals.h"
 
-static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
+/**
+ *     dynamic_irq_init - initialize a dynamically allocated irq
+ *     @irq:   irq number to initialize
+ */
+void dynamic_irq_init(unsigned int irq)
 {
        struct irq_desc *desc;
        unsigned long flags;
@@ -37,8 +41,7 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
        desc->depth = 1;
        desc->msi_desc = NULL;
        desc->handler_data = NULL;
-       if (!keep_chip_data)
-               desc->chip_data = NULL;
+       desc->chip_data = NULL;
        desc->action = NULL;
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
@@ -52,26 +55,10 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
 }
 
 /**
- *     dynamic_irq_init - initialize a dynamically allocated irq
- *     @irq:   irq number to initialize
- */
-void dynamic_irq_init(unsigned int irq)
-{
-       dynamic_irq_init_x(irq, false);
-}
-
-/**
- *     dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
+ *     dynamic_irq_cleanup - cleanup a dynamically allocated irq
  *     @irq:   irq number to initialize
- *
- *     does not set irq_to_desc(irq)->chip_data to NULL
  */
-void dynamic_irq_init_keep_chip_data(unsigned int irq)
-{
-       dynamic_irq_init_x(irq, true);
-}
-
-static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
+void dynamic_irq_cleanup(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@ -90,8 +77,7 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
        }
        desc->msi_desc = NULL;
        desc->handler_data = NULL;
-       if (!keep_chip_data)
-               desc->chip_data = NULL;
+       desc->chip_data = NULL;
        desc->handle_irq = handle_bad_irq;
        desc->chip = &no_irq_chip;
        desc->name = NULL;
@@ -99,26 +85,6 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
        spin_unlock_irqrestore(&desc->lock, flags);
 }
 
-/**
- *     dynamic_irq_cleanup - cleanup a dynamically allocated irq
- *     @irq:   irq number to initialize
- */
-void dynamic_irq_cleanup(unsigned int irq)
-{
-       dynamic_irq_cleanup_x(irq, false);
-}
-
-/**
- *     dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
- *     @irq:   irq number to initialize
- *
- *     does not set irq_to_desc(irq)->chip_data to NULL
- */
-void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
-{
-       dynamic_irq_cleanup_x(irq, true);
-}
-
 
 /**
  *     set_irq_chip - set the irq chip for an irq
index f34e23178f54ce4d6db2e5c47d4376867ec4eaef..bde4c667d24dd067597f8451ce2fecc12bca17fa 100644 (file)
@@ -200,7 +200,7 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
 {
        if (suspend) {
-               if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
+               if (!desc->action || (desc->action->flags & IRQF_TIMER))
                        return;
                desc->status |= IRQ_SUSPENDED;
        }
@@ -436,9 +436,6 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
                desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
                desc->status |= flags;
-
-               if (chip != desc->chip)
-                       irq_chip_set_defaults(desc->chip);
        }
 
        return ret;
@@ -738,16 +735,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                if (new->flags & IRQF_ONESHOT)
                        desc->status |= IRQ_ONESHOT;
 
-               /*
-                * Force MSI interrupts to run with interrupts
-                * disabled. The multi vector cards can cause stack
-                * overflows due to nested interrupts when enough of
-                * them are directed to a core and fire at the same
-                * time.
-                */
-               if (desc->msi_desc)
-                       new->flags |= IRQF_DISABLED;
-
                if (!(desc->status & IRQ_NOAUTOEN)) {
                        desc->depth = 0;
                        desc->status &= ~IRQ_DISABLED;
index 84027cfffccf05b412fe8261351e7dee28dc23a2..ab7ae57773e1b41f263407eb4a3bad1c95b41289 100644 (file)
@@ -196,7 +196,7 @@ int kthreadd(void *unused)
        set_task_comm(tsk, "kthreadd");
        ignore_signals(tsk);
        set_cpus_allowed_ptr(tsk, cpu_all_mask);
-       set_mems_allowed(node_states[N_HIGH_MEMORY]);
+       set_mems_allowed(node_possible_map);
 
        current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
 
index e99e7cd9cf4784ee8289c69584e17fcc287159b5..ca07c5c0c914186de89fb26e7323193375051178 100644 (file)
@@ -195,7 +195,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
 
        account_global_scheduler_latency(tsk, &lat);
 
-       for (i = 0; i < tsk->latency_record_count; i++) {
+       /*
+        * short term hack; if we're > 32 we stop; future we recycle:
+        */
+       tsk->latency_record_count++;
+       if (tsk->latency_record_count >= LT_SAVECOUNT)
+               goto out_unlock;
+
+       for (i = 0; i < LT_SAVECOUNT; i++) {
                struct latency_record *mylat;
                int same = 1;
 
@@ -221,14 +228,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
                }
        }
 
-       /*
-        * short term hack; if we're > 32 we stop; future we recycle:
-        */
-       if (tsk->latency_record_count >= LT_SAVECOUNT)
-               goto out_unlock;
-
        /* Allocated a new one: */
-       i = tsk->latency_record_count++;
+       i = tsk->latency_record_count;
        memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
 
 out_unlock:
index eb1b9a77ebfbd13ca7991def75bd16611e334499..b5b48fea1b5bf4004322963ba67a3edf94ad33c5 100644 (file)
@@ -870,6 +870,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
        mutex_lock(&module_mutex);
        /* Store the name of the last unloaded module for diagnostic purposes */
        strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
+       ddebug_remove_module(mod->name);
        free_module(mod);
 
  out:
@@ -1532,9 +1533,6 @@ static void free_module(struct module *mod)
        remove_sect_attrs(mod);
        mod_kobject_remove(mod);
 
-       /* Remove dynamic debug info */
-       ddebug_remove_module(mod->name);
-
        /* Arch-specific cleanup. */
        module_arch_cleanup(mod);
 
index f85644c878dd66b70101de3d504ddfdf6e46d32f..947b3ad551f8a925c39ef6f53f4f37c16f8a3ea3 100644 (file)
@@ -171,13 +171,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        for (;;) {
                struct thread_info *owner;
 
-               /*
-                * If we own the BKL, then don't spin. The owner of
-                * the mutex might be waiting on us to release the BKL.
-                */
-               if (unlikely(current->lock_depth >= 0))
-                       break;
-
                /*
                 * If there's an owner, wait for it to either
                 * release the lock or go to sleep.
index 183d437f4a0fb7c5ad96ad82ae3f6d28333e52e1..413d101ca8eea782725305eedbaa397a36d814d5 100644 (file)
@@ -4510,8 +4510,8 @@ SYSCALL_DEFINE5(perf_event_open,
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
        struct file *group_file = NULL;
-       int event_fd;
        int fput_needed = 0;
+       int fput_needed2 = 0;
        int err;
 
        /* for future expandability... */
@@ -4532,18 +4532,12 @@ SYSCALL_DEFINE5(perf_event_open,
                        return -EINVAL;
        }
 
-       event_fd = get_unused_fd_flags(O_RDWR);
-       if (event_fd < 0)
-               return event_fd;
-
        /*
         * Get the target context (task or percpu):
         */
        ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_fd;
-       }
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
 
        /*
         * Look up the group leader (we will attach this event to it):
@@ -4583,11 +4577,13 @@ SYSCALL_DEFINE5(perf_event_open,
        if (IS_ERR(event))
                goto err_put_context;
 
-       event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
-       if (IS_ERR(event_file)) {
-               err = PTR_ERR(event_file);
+       err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
+       if (err < 0)
+               goto err_free_put_context;
+
+       event_file = fget_light(err, &fput_needed2);
+       if (!event_file)
                goto err_free_put_context;
-       }
 
        if (flags & PERF_FLAG_FD_OUTPUT) {
                err = perf_event_set_output(event, group_fd);
@@ -4608,19 +4604,19 @@ SYSCALL_DEFINE5(perf_event_open,
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
 
-       fput_light(group_file, fput_needed);
-       fd_install(event_fd, event_file);
-       return event_fd;
-
 err_fput_free_put_context:
-       fput(event_file);
+       fput_light(event_file, fput_needed2);
+
 err_free_put_context:
-       free_event(event);
+       if (err < 0)
+               kfree(event);
+
 err_put_context:
+       if (err < 0)
+               put_ctx(ctx);
+
        fput_light(group_file, fput_needed);
-       put_ctx(ctx);
-err_fd:
-       put_unused_fd(event_fd);
+
        return err;
 }
 
@@ -4985,22 +4981,12 @@ int perf_event_init_task(struct task_struct *child)
        return ret;
 }
 
-static void __init perf_event_init_all_cpus(void)
-{
-       int cpu;
-       struct perf_cpu_context *cpuctx;
-
-       for_each_possible_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               __perf_event_init_context(&cpuctx->ctx, NULL);
-       }
-}
-
 static void __cpuinit perf_event_init_cpu(int cpu)
 {
        struct perf_cpu_context *cpuctx;
 
        cpuctx = &per_cpu(perf_cpu_context, cpu);
+       __perf_event_init_context(&cpuctx->ctx, NULL);
 
        spin_lock(&perf_resource_lock);
        cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
@@ -5071,7 +5057,6 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
 
 void __init perf_event_init(void)
 {
-       perf_event_init_all_cpus();
        perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
                        (void *)(long)smp_processor_id());
        perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
index 5e76d22e7024d8c228a4a2f6619921425a3c19a6..495440779ce3b91c9927b98c61a83922b8578a31 100644 (file)
@@ -559,7 +559,14 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
        new_timer->it_id = (timer_t) new_timer_id;
        new_timer->it_clock = which_clock;
        new_timer->it_overrun = -1;
+       error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
+       if (error)
+               goto out;
 
+       /*
+        * return the timer_id now.  The next step is hard to
+        * back out if there is an error.
+        */
        if (copy_to_user(created_timer_id,
                         &new_timer_id, sizeof (new_timer_id))) {
                error = -EFAULT;
@@ -590,10 +597,6 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
        new_timer->sigq->info.si_tid   = new_timer->it_id;
        new_timer->sigq->info.si_code  = SI_TIMER;
 
-       error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
-       if (error)
-               goto out;
-
        spin_lock_irq(&current->sighand->siglock);
        new_timer->it_signal = current->signal;
        list_add(&new_timer->list, &current->signal->posix_timers);
index 97d0af323b603c709ca3c1245e0a9477bca9f0c8..c713cdabd2395c3525066a02bd56a62eeb3540be 100644 (file)
@@ -150,7 +150,7 @@ static void thaw_tasks(bool nosig_only)
                if (nosig_only && should_send_signal(p))
                        continue;
 
-               if (cgroup_freezing_or_frozen(p))
+               if (cgroup_frozen(p))
                        continue;
 
                thaw_process(p);
index fc9ed15dcf03039760f92fa3150fd5c2162c1fd1..36cb168e4330936a9120691fd19e80ddb8d2db9d 100644 (file)
@@ -1181,7 +1181,7 @@ static void free_unnecessary_pages(void)
 
        memory_bm_position_reset(&copy_bm);
 
-       while (to_free_normal > 0 || to_free_highmem > 0) {
+       while (to_free_normal > 0 && to_free_highmem > 0) {
                unsigned long pfn = memory_bm_next_pfn(&copy_bm);
                struct page *page = pfn_to_page(pfn);
 
index dfadc5b729f194905ded52faef8b2d2564dfd2ce..a55d3a367ae86a95e3788e6162717188bb05ad90 100644 (file)
@@ -127,10 +127,8 @@ int __ref profile_init(void)
                return 0;
 
        prof_buffer = vmalloc(buffer_bytes);
-       if (prof_buffer) {
-               memset(prof_buffer, 0, buffer_bytes);
+       if (prof_buffer)
                return 0;
-       }
 
        free_cpumask_var(prof_cpu_mask);
        return -ENOMEM;
index e28e6c520bca8c0560a909bd544d843306456157..f297f4c2be5074b457b609130ece529bf0eb5fe5 100644 (file)
@@ -542,6 +542,7 @@ struct rq {
        struct load_weight load;
        unsigned long nr_load_updates;
        u64 nr_switches;
+       u64 nr_migrations_in;
 
        struct cfs_rq cfs;
        struct rt_rq rt;
@@ -741,7 +742,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
 {
        char buf[64];
-       char *cmp;
+       char *cmp = buf;
        int neg = 0;
        int i;
 
@@ -752,7 +753,6 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
                return -EFAULT;
 
        buf[cnt] = 0;
-       cmp = strstrip(buf);
 
        if (strncmp(buf, "NO_", 3) == 0) {
                neg = 1;
@@ -760,7 +760,9 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        }
 
        for (i = 0; sched_feat_names[i]; i++) {
-               if (strcmp(cmp, sched_feat_names[i]) == 0) {
+               int len = strlen(sched_feat_names[i]);
+
+               if (strncmp(cmp, sched_feat_names[i], len) == 0) {
                        if (neg)
                                sysctl_sched_features &= ~(1UL << i);
                        else
@@ -940,15 +942,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 }
 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 
-/*
- * Check whether the task is waking, we use this to synchronize ->cpus_allowed
- * against ttwu().
- */
-static inline int task_is_waking(struct task_struct *p)
-{
-       return unlikely(p->state == TASK_WAKING);
-}
-
 /*
  * __task_rq_lock - lock the runqueue a given task resides on.
  * Must be called interrupts disabled.
@@ -956,10 +949,8 @@ static inline int task_is_waking(struct task_struct *p)
 static inline struct rq *__task_rq_lock(struct task_struct *p)
        __acquires(rq->lock)
 {
-       struct rq *rq;
-
        for (;;) {
-               rq = task_rq(p);
+               struct rq *rq = task_rq(p);
                spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
@@ -1270,12 +1261,6 @@ static void sched_avg_update(struct rq *rq)
        s64 period = sched_avg_period();
 
        while ((s64)(rq->clock - rq->age_stamp) > period) {
-               /*
-                * Inline assembly required to prevent the compiler
-                * optimising this loop into a divmod call.
-                * See __iter_div_u64_rem() for another example of this.
-                */
-               asm("" : "+rm" (rq->age_stamp));
                rq->age_stamp += period;
                rq->rt_avg /= 2;
        }
@@ -1632,7 +1617,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
  */
 static int tg_shares_up(struct task_group *tg, void *data)
 {
-       unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
+       unsigned long weight, rq_weight = 0, shares = 0;
        unsigned long *usd_rq_weight;
        struct sched_domain *sd = data;
        unsigned long flags;
@@ -1648,7 +1633,6 @@ static int tg_shares_up(struct task_group *tg, void *data)
                weight = tg->cfs_rq[i]->load.weight;
                usd_rq_weight[i] = weight;
 
-               rq_weight += weight;
                /*
                 * If there are currently no tasks on the cpu pretend there
                 * is one of average load so that when a new task gets to
@@ -1657,13 +1641,10 @@ static int tg_shares_up(struct task_group *tg, void *data)
                if (!weight)
                        weight = NICE_0_LOAD;
 
-               sum_weight += weight;
+               rq_weight += weight;
                shares += tg->cfs_rq[i]->shares;
        }
 
-       if (!rq_weight)
-               rq_weight = sum_weight;
-
        if ((!shares && rq_weight) || shares > tg->shares)
                shares = tg->shares;
 
@@ -1730,6 +1711,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
 
 static void update_h_load(long cpu)
 {
+       if (root_task_group_empty())
+               return;
+
        walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
 }
 
@@ -1831,20 +1815,6 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
 static void calc_load_account_active(struct rq *this_rq);
 static void update_sysctl(void);
 
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
-       set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
-       /*
-        * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-        * successfuly executed on another CPU. We must ensure that updates of
-        * per-task data have been completed by this moment.
-        */
-       smp_wmb();
-       task_thread_info(p)->cpu = cpu;
-#endif
-}
-
 #include "sched_stats.h"
 #include "sched_idletask.c"
 #include "sched_fair.c"
@@ -1894,14 +1864,13 @@ static void update_avg(u64 *avg, u64 sample)
        *avg += diff >> 3;
 }
 
-static void
-enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
 {
        if (wakeup)
                p->se.start_runtime = p->se.sum_exec_runtime;
 
        sched_info_queued(p);
-       p->sched_class->enqueue_task(rq, p, wakeup, head);
+       p->sched_class->enqueue_task(rq, p, wakeup);
        p->se.on_rq = 1;
 }
 
@@ -1977,7 +1946,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
 
-       enqueue_task(rq, p, wakeup, false);
+       enqueue_task(rq, p, wakeup);
        inc_nr_running(rq);
 }
 
@@ -2002,6 +1971,20 @@ inline int task_curr(const struct task_struct *p)
        return cpu_curr(task_cpu(p)) == p;
 }
 
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+       set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+       /*
+        * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+        * successfuly executed on another CPU. We must ensure that updates of
+        * per-task data have been completed by this moment.
+        */
+       smp_wmb();
+       task_thread_info(p)->cpu = cpu;
+#endif
+}
+
 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                                       const struct sched_class *prev_class,
                                       int oldprio, int running)
@@ -2028,15 +2011,21 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
  */
 void kthread_bind(struct task_struct *p, unsigned int cpu)
 {
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+
        /* Must have done schedule() in kthread() before we set_task_cpu */
        if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
                WARN_ON(1);
                return;
        }
 
+       spin_lock_irqsave(&rq->lock, flags);
+       set_task_cpu(p, cpu);
        p->cpus_allowed = cpumask_of_cpu(cpu);
        p->rt.nr_cpus_allowed = 1;
        p->flags |= PF_THREAD_BOUND;
+       spin_unlock_irqrestore(&rq->lock, flags);
 }
 EXPORT_SYMBOL(kthread_bind);
 
@@ -2074,23 +2063,35 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
        int old_cpu = task_cpu(p);
+       struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
+       struct cfs_rq *old_cfsrq = task_cfs_rq(p),
+                     *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
+       u64 clock_offset;
 
-#ifdef CONFIG_SCHED_DEBUG
-       /*
-        * We should never call set_task_cpu() on a blocked task,
-        * ttwu() will sort out the placement.
-        */
-       WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-                       !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
-#endif
+       clock_offset = old_rq->clock - new_rq->clock;
 
        trace_sched_migrate_task(p, new_cpu);
 
+#ifdef CONFIG_SCHEDSTATS
+       if (p->se.wait_start)
+               p->se.wait_start -= clock_offset;
+       if (p->se.sleep_start)
+               p->se.sleep_start -= clock_offset;
+       if (p->se.block_start)
+               p->se.block_start -= clock_offset;
+#endif
        if (old_cpu != new_cpu) {
                p->se.nr_migrations++;
+               new_rq->nr_migrations_in++;
+#ifdef CONFIG_SCHEDSTATS
+               if (task_hot(p, old_rq->clock, NULL))
+                       schedstat_inc(p, se.nr_forced2_migrations);
+#endif
                perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
                                     1, 1, NULL, 0);
        }
+       p->se.vruntime -= old_cfsrq->min_vruntime -
+                                        new_cfsrq->min_vruntime;
 
        __set_task_cpu(p, new_cpu);
 }
@@ -2115,10 +2116,12 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
 
        /*
         * If the task is not on a runqueue (and not running), then
-        * the next wake-up will properly place the task.
+        * it is sufficient to simply update the task's cpu field.
         */
-       if (!p->se.on_rq && !task_running(rq, p))
+       if (!p->se.on_rq && !task_running(rq, p)) {
+               set_task_cpu(p, dest_cpu);
                return 0;
+       }
 
        init_completion(&req->done);
        req->task = p;
@@ -2323,69 +2326,6 @@ void task_oncpu_function_call(struct task_struct *p,
        preempt_enable();
 }
 
-#ifdef CONFIG_SMP
-/*
- * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
- */
-static int select_fallback_rq(int cpu, struct task_struct *p)
-{
-       int dest_cpu;
-       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
-
-       /* Look for allowed, online CPU in same node. */
-       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
-               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
-                       return dest_cpu;
-
-       /* Any allowed, online CPU? */
-       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
-       if (dest_cpu < nr_cpu_ids)
-               return dest_cpu;
-
-       /* No more Mr. Nice Guy. */
-       if (unlikely(dest_cpu >= nr_cpu_ids)) {
-               dest_cpu = cpuset_cpus_allowed_fallback(p);
-               /*
-                * Don't tell them about moving exiting tasks or
-                * kernel threads (both mm NULL), since they never
-                * leave kernel.
-                */
-               if (p->mm && printk_ratelimit()) {
-                       printk(KERN_INFO "process %d (%s) no "
-                              "longer affine to cpu%d\n",
-                              task_pid_nr(p), p->comm, cpu);
-               }
-       }
-
-       return dest_cpu;
-}
-
-/*
- * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
- */
-static inline
-int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
-{
-       int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
-
-       /*
-        * In order not to call set_task_cpu() on a blocking task we need
-        * to rely on ttwu() to place the task on a valid ->cpus_allowed
-        * cpu.
-        *
-        * Since this is common to all placement strategies, this lives here.
-        *
-        * [ this allows ->select_task() to simply return task_cpu(p) and
-        *   not worry about this generic constraint ]
-        */
-       if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
-                    !cpu_online(cpu)))
-               cpu = select_fallback_rq(task_cpu(p), p);
-
-       return cpu;
-}
-#endif
-
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
@@ -2434,34 +2374,22 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
         *
         * First fix up the nr_uninterruptible count:
         */
-       if (task_contributes_to_load(p)) {
-               if (likely(cpu_online(orig_cpu)))
-                       rq->nr_uninterruptible--;
-               else
-                       this_rq()->nr_uninterruptible--;
-       }
+       if (task_contributes_to_load(p))
+               rq->nr_uninterruptible--;
        p->state = TASK_WAKING;
+       task_rq_unlock(rq, &flags);
 
-       if (p->sched_class->task_waking)
-               p->sched_class->task_waking(rq, p);
-
-       cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
        if (cpu != orig_cpu)
                set_task_cpu(p, cpu);
-       __task_rq_unlock(rq);
 
-       rq = cpu_rq(cpu);
-       spin_lock(&rq->lock);
-       update_rq_clock(rq);
+       rq = task_rq_lock(p, &flags);
+
+       if (rq != orig_rq)
+               update_rq_clock(rq);
 
-       /*
-        * We migrated the task without holding either rq->lock, however
-        * since the task is not on the task list itself, nobody else
-        * will try and migrate the task, hence the rq should match the
-        * cpu we just moved it to.
-        */
-       WARN_ON(task_cpu(p) != cpu);
        WARN_ON(p->state != TASK_WAKING);
+       cpu = task_cpu(p);
 
 #ifdef CONFIG_SCHEDSTATS
        schedstat_inc(rq, ttwu_count);
@@ -2514,8 +2442,8 @@ out_running:
 
        p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
-               p->sched_class->task_woken(rq, p);
+       if (p->sched_class->task_wake_up)
+               p->sched_class->task_wake_up(rq, p);
 
        if (unlikely(rq->idle_stamp)) {
                u64 delta = rq->clock - rq->idle_stamp;
@@ -2595,6 +2523,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.nr_failed_migrations_running      = 0;
        p->se.nr_failed_migrations_hot          = 0;
        p->se.nr_forced_migrations              = 0;
+       p->se.nr_forced2_migrations             = 0;
 
        p->se.nr_wakeups                        = 0;
        p->se.nr_wakeups_sync                   = 0;
@@ -2615,6 +2544,14 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
+
+       /*
+        * We mark the process as running here, but have not actually
+        * inserted it onto the runqueue yet. This guarantees that
+        * nobody will actually run it, and a signal or other external
+        * event cannot wake it up and insert it on the runqueue either.
+        */
+       p->state = TASK_RUNNING;
 }
 
 /*
@@ -2625,12 +2562,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
        int cpu = get_cpu();
 
        __sched_fork(p);
-       /*
-        * We mark the process as running here. This guarantees that
-        * nobody will actually run it, and a signal or other external
-        * event cannot wake it up and insert it on the runqueue either.
-        */
-       p->state = TASK_RUNNING;
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2662,9 +2593,9 @@ void sched_fork(struct task_struct *p, int clone_flags)
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
-       if (p->sched_class->task_fork)
-               p->sched_class->task_fork(p);
-
+#ifdef CONFIG_SMP
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+#endif
        set_task_cpu(p, cpu);
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -2694,38 +2625,28 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 {
        unsigned long flags;
        struct rq *rq;
-       int cpu = get_cpu();
-
-#ifdef CONFIG_SMP
-       rq = task_rq_lock(p, &flags);
-       p->state = TASK_WAKING;
-
-       /*
-        * Fork balancing, do it here and not earlier because:
-        *  - cpus_allowed can change in the fork path
-        *  - any previously selected cpu might disappear through hotplug
-        *
-        * We set TASK_WAKING so that select_task_rq() can drop rq->lock
-        * without people poking at ->cpus_allowed.
-        */
-       cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
-       set_task_cpu(p, cpu);
-
-       p->state = TASK_RUNNING;
-       task_rq_unlock(rq, &flags);
-#endif
 
        rq = task_rq_lock(p, &flags);
+       BUG_ON(p->state != TASK_RUNNING);
        update_rq_clock(rq);
-       activate_task(rq, p, 0);
+
+       if (!p->sched_class->task_new || !current->se.on_rq) {
+               activate_task(rq, p, 0);
+       } else {
+               /*
+                * Let the scheduling class do new task startup
+                * management (if any):
+                */
+               p->sched_class->task_new(rq, p);
+               inc_nr_running(rq);
+       }
        trace_sched_wakeup_new(rq, p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
-               p->sched_class->task_woken(rq, p);
+       if (p->sched_class->task_wake_up)
+               p->sched_class->task_wake_up(rq, p);
 #endif
        task_rq_unlock(rq, &flags);
-       put_cpu();
 }
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -3111,6 +3032,15 @@ static void calc_load_account_active(struct rq *this_rq)
        }
 }
 
+/*
+ * Externally visible per-cpu scheduler statistics:
+ * cpu_nr_migrations(cpu) - number of migrations into that cpu
+ */
+u64 cpu_nr_migrations(int cpu)
+{
+       return cpu_rq(cpu)->nr_migrations_in;
+}
+
 /*
  * Update rq->cpu_load[] statistics. This function is usually called every
  * scheduler tick (TICK_NSEC).
@@ -3193,28 +3123,24 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
 }
 
 /*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
+ * If dest_cpu is allowed for this process, migrate the task to it.
+ * This is accomplished by forcing the cpu_allowed mask to only
+ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
+ * the cpu_allowed mask is restored.
  */
-void sched_exec(void)
+static void sched_migrate_task(struct task_struct *p, int dest_cpu)
 {
-       struct task_struct *p = current;
        struct migration_req req;
        unsigned long flags;
        struct rq *rq;
-       int dest_cpu;
 
        rq = task_rq_lock(p, &flags);
-       dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
-       if (dest_cpu == smp_processor_id())
-               goto unlock;
+       if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
+           || unlikely(!cpu_active(dest_cpu)))
+               goto out;
 
-       /*
-        * select_task_rq() can race against ->cpus_allowed
-        */
-       if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
-           likely(cpu_active(dest_cpu)) &&
-           migrate_task(p, dest_cpu, &req)) {
+       /* force the process onto the specified CPU */
+       if (migrate_task(p, dest_cpu, &req)) {
                /* Need to wait for migration thread (might exit: take ref). */
                struct task_struct *mt = rq->migration_thread;
 
@@ -3226,10 +3152,23 @@ void sched_exec(void)
 
                return;
        }
-unlock:
+out:
        task_rq_unlock(rq, &flags);
 }
 
+/*
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
+ */
+void sched_exec(void)
+{
+       int new_cpu, this_cpu = get_cpu();
+       new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
+       put_cpu();
+       if (new_cpu != this_cpu)
+               sched_migrate_task(current, new_cpu);
+}
+
 /*
  * pull_task - move a task from a remote runqueue to the local runqueue.
  * Both runqueues must be locked.
@@ -3463,7 +3402,6 @@ struct sd_lb_stats {
        unsigned long max_load;
        unsigned long busiest_load_per_task;
        unsigned long busiest_nr_running;
-       unsigned long busiest_group_capacity;
 
        int group_imb; /* Is there imbalance in this sd */
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3677,7 +3615,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
 
 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
+       unsigned long weight = cpumask_weight(sched_domain_span(sd));
        unsigned long smt_gain = sd->smt_gain;
 
        smt_gain /= weight;
@@ -3710,7 +3648,7 @@ unsigned long scale_rt_power(int cpu)
 
 static void update_cpu_power(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
+       unsigned long weight = cpumask_weight(sched_domain_span(sd));
        unsigned long power = SCHED_LOAD_SCALE;
        struct sched_group *sdg = sd->groups;
 
@@ -3783,7 +3721,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        unsigned long load, max_cpu_load, min_cpu_load;
        int i;
        unsigned int balance_cpu = -1, first_idle_cpu = 0;
-       unsigned long avg_load_per_task = 0;
+       unsigned long sum_avg_load_per_task;
+       unsigned long avg_load_per_task;
 
        if (local_group) {
                balance_cpu = group_first_cpu(group);
@@ -3792,6 +3731,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        }
 
        /* Tally up the load of all CPUs in the group */
+       sum_avg_load_per_task = avg_load_per_task = 0;
        max_cpu_load = 0;
        min_cpu_load = ~0UL;
 
@@ -3821,6 +3761,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
                sgs->sum_nr_running += rq->nr_running;
                sgs->sum_weighted_load += weighted_cpuload(i);
 
+               sum_avg_load_per_task += cpu_avg_load_per_task(i);
        }
 
        /*
@@ -3838,6 +3779,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        /* Adjust by relative CPU power of the group */
        sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
 
+
        /*
         * Consider the group unbalanced when the imbalance is larger
         * than the average weight of two tasks.
@@ -3847,8 +3789,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
         *      normalized nr_running number somewhere that negates
         *      the hierarchy?
         */
-       if (sgs->sum_nr_running)
-               avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+       avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
+               group->cpu_power;
 
        if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
                sgs->group_imb = 1;
@@ -3917,7 +3859,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        sds->max_load = sgs.avg_load;
                        sds->busiest = group;
                        sds->busiest_nr_running = sgs.sum_nr_running;
-                       sds->busiest_group_capacity = sgs.group_capacity;
                        sds->busiest_load_per_task = sgs.sum_weighted_load;
                        sds->group_imb = sgs.group_imb;
                }
@@ -3940,7 +3881,6 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
 {
        unsigned long tmp, pwr_now = 0, pwr_move = 0;
        unsigned int imbn = 2;
-       unsigned long scaled_busy_load_per_task;
 
        if (sds->this_nr_running) {
                sds->this_load_per_task /= sds->this_nr_running;
@@ -3951,12 +3891,8 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
                sds->this_load_per_task =
                        cpu_avg_load_per_task(this_cpu);
 
-       scaled_busy_load_per_task = sds->busiest_load_per_task
-                                                * SCHED_LOAD_SCALE;
-       scaled_busy_load_per_task /= sds->busiest->cpu_power;
-
-       if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
-                       (scaled_busy_load_per_task * imbn)) {
+       if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
+                       sds->busiest_load_per_task * imbn) {
                *imbalance = sds->busiest_load_per_task;
                return;
        }
@@ -4007,14 +3943,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
 static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                unsigned long *imbalance)
 {
-       unsigned long max_pull, load_above_capacity = ~0UL;
-
-       sds->busiest_load_per_task /= sds->busiest_nr_running;
-       if (sds->group_imb) {
-               sds->busiest_load_per_task =
-                       min(sds->busiest_load_per_task, sds->avg_load);
-       }
-
+       unsigned long max_pull;
        /*
         * In the presence of smp nice balancing, certain scenarios can have
         * max load less than avg load(as we skip the groups at or below
@@ -4025,29 +3954,9 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                return fix_small_imbalance(sds, this_cpu, imbalance);
        }
 
-       if (!sds->group_imb) {
-               /*
-                * Don't want to pull so many tasks that a group would go idle.
-                */
-               load_above_capacity = (sds->busiest_nr_running -
-                                               sds->busiest_group_capacity);
-
-               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
-
-               load_above_capacity /= sds->busiest->cpu_power;
-       }
-
-       /*
-        * We're trying to get all the cpus to the average_load, so we don't
-        * want to push ourselves above the average load, nor do we wish to
-        * reduce the max loaded cpu below the average load. At the same time,
-        * we also don't want to reduce the group load below the group capacity
-        * (so that we can implement power-savings policies etc). Thus we look
-        * for the minimum possible imbalance.
-        * Be careful of negative numbers as they'll appear as very large values
-        * with unsigned longs.
-        */
-       max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
+       /* Don't want to pull so many tasks that a group would go idle */
+       max_pull = min(sds->max_load - sds->avg_load,
+                       sds->max_load - sds->busiest_load_per_task);
 
        /* How much load to actually move to equalise the imbalance */
        *imbalance = min(max_pull * sds->busiest->cpu_power,
@@ -4115,6 +4024,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * 4) This group is more busy than the avg busieness at this
         *    sched_domain.
         * 5) The imbalance is within the specified limit.
+        * 6) Any rebalance would lead to ping-pong
         */
        if (balance && !(*balance))
                goto ret;
@@ -4133,6 +4043,25 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
        if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
                goto out_balanced;
 
+       sds.busiest_load_per_task /= sds.busiest_nr_running;
+       if (sds.group_imb)
+               sds.busiest_load_per_task =
+                       min(sds.busiest_load_per_task, sds.avg_load);
+
+       /*
+        * We're trying to get all the cpus to the average_load, so we don't
+        * want to push ourselves above the average load, nor do we wish to
+        * reduce the max loaded cpu below the average load, as either of these
+        * actions would just result in more rebalancing later, and ping-pong
+        * tasks around. Thus we look for the minimum possible imbalance.
+        * Negative imbalances (*we* are more loaded than anyone else) will
+        * be counted as no imbalance for these purposes -- we can't fix that
+        * by pulling tasks to us. Be careful of negative numbers as they'll
+        * appear as very large values with unsigned longs.
+        */
+       if (sds.max_load <= sds.busiest_load_per_task)
+               goto out_balanced;
+
        /* Looks like there is an imbalance. Compute it */
        calculate_imbalance(&sds, this_cpu, imbalance);
        return sds.busiest;
@@ -4169,23 +4098,12 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
                        continue;
 
                rq = cpu_rq(i);
-               wl = weighted_cpuload(i);
+               wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
+               wl /= power;
 
-               /*
-                * When comparing with imbalance, use weighted_cpuload()
-                * which is not scaled with the cpu power.
-                */
                if (capacity && rq->nr_running == 1 && wl > imbalance)
                        continue;
 
-               /*
-                * For the load comparisons with the other cpu's, consider
-                * the weighted_cpuload() scaled with the cpu power, so that
-                * the load can be moved away from the cpu that is potentially
-                * running at a lower capacity.
-                */
-               wl = (wl * SCHED_LOAD_SCALE) / power;
-
                if (wl > max_load) {
                        max_load = wl;
                        busiest = rq;
@@ -5271,90 +5189,45 @@ cputime_t task_stime(struct task_struct *p)
 {
        return p->stime;
 }
-
-void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
-{
-       struct task_cputime cputime;
-
-       thread_group_cputime(p, &cputime);
-
-       *ut = cputime.utime;
-       *st = cputime.stime;
-}
 #else
-
-#ifndef nsecs_to_cputime
-# define nsecs_to_cputime(__nsecs) \
-       msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
-#endif
-
 cputime_t task_utime(struct task_struct *p)
 {
-       cputime_t utime = p->utime, total = utime + p->stime;
+       clock_t utime = cputime_to_clock_t(p->utime),
+               total = utime + cputime_to_clock_t(p->stime);
        u64 temp;
 
        /*
         * Use CFS's precise accounting:
         */
-       temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime);
+       temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
 
        if (total) {
                temp *= utime;
                do_div(temp, total);
        }
-       utime = (cputime_t)temp;
+       utime = (clock_t)temp;
 
-       p->prev_utime = max(p->prev_utime, utime);
+       p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
        return p->prev_utime;
 }
 
 cputime_t task_stime(struct task_struct *p)
 {
-       cputime_t stime;
+       clock_t stime;
 
        /*
         * Use CFS's precise accounting. (we subtract utime from
         * the total, to make sure the total observed by userspace
         * grows monotonically - apps rely on that):
         */
-       stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
+       stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
+                       cputime_to_clock_t(task_utime(p));
 
        if (stime >= 0)
-               p->prev_stime = max(p->prev_stime, stime);
+               p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
 
        return p->prev_stime;
 }
-
-/*
- * Must be called with siglock held.
- */
-void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
-{
-       struct signal_struct *sig = p->signal;
-       struct task_cputime cputime;
-       cputime_t rtime, utime, total;
-
-       thread_group_cputime(p, &cputime);
-
-       total = cputime_add(cputime.utime, cputime.stime);
-       rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
-
-       if (total) {
-               u64 temp = rtime;
-
-               temp *= cputime.utime;
-               do_div(temp, total);
-               utime = (cputime_t)temp;
-       } else
-               utime = rtime;
-
-       sig->prev_utime = max(sig->prev_utime, utime);
-       sig->prev_stime = max(sig->prev_stime,
-                             cputime_sub(rtime, sig->prev_utime));
-
-       *ut = sig->prev_utime;
-       *st = sig->prev_stime;
-}
 #endif
 
 inline cputime_t task_gtime(struct task_struct *p)
@@ -5646,7 +5519,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
         * the mutex owner just released it and exited.
         */
        if (probe_kernel_address(&owner->cpu, cpu))
-               return 0;
+               goto out;
 #else
        cpu = owner->cpu;
 #endif
@@ -5656,14 +5529,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
         * the cpu field may no longer be valid.
         */
        if (cpu >= nr_cpumask_bits)
-               return 0;
+               goto out;
 
        /*
         * We need to validate that we can do a
         * get_cpu() and that we have the percpu area.
         */
        if (!cpu_online(cpu))
-               return 0;
+               goto out;
 
        rq = cpu_rq(cpu);
 
@@ -5682,7 +5555,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
 
                cpu_relax();
        }
-
+out:
        return 1;
 }
 #endif
@@ -6030,15 +5903,14 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  */
 bool try_wait_for_completion(struct completion *x)
 {
-       unsigned long flags;
        int ret = 1;
 
-       spin_lock_irqsave(&x->wait.lock, flags);
+       spin_lock_irq(&x->wait.lock);
        if (!x->done)
                ret = 0;
        else
                x->done--;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
+       spin_unlock_irq(&x->wait.lock);
        return ret;
 }
 EXPORT_SYMBOL(try_wait_for_completion);
@@ -6053,13 +5925,12 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
-       unsigned long flags;
        int ret = 1;
 
-       spin_lock_irqsave(&x->wait.lock, flags);
+       spin_lock_irq(&x->wait.lock);
        if (!x->done)
                ret = 0;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
+       spin_unlock_irq(&x->wait.lock);
        return ret;
 }
 EXPORT_SYMBOL(completion_done);
@@ -6127,7 +5998,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        unsigned long flags;
        int oldprio, on_rq, running;
        struct rq *rq;
-       const struct sched_class *prev_class;
+       const struct sched_class *prev_class = p->sched_class;
 
        BUG_ON(prio < 0 || prio > MAX_PRIO);
 
@@ -6135,7 +6006,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        update_rq_clock(rq);
 
        oldprio = p->prio;
-       prev_class = p->sched_class;
        on_rq = p->se.on_rq;
        running = task_current(rq, p);
        if (on_rq)
@@ -6153,7 +6023,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        if (running)
                p->sched_class->set_curr_task(rq);
        if (on_rq) {
-               enqueue_task(rq, p, 0, oldprio < prio);
+               enqueue_task(rq, p, 0);
 
                check_class_changed(rq, p, prev_class, oldprio, running);
        }
@@ -6197,7 +6067,7 @@ void set_user_nice(struct task_struct *p, long nice)
        delta = p->prio - old_prio;
 
        if (on_rq) {
-               enqueue_task(rq, p, 0, false);
+               enqueue_task(rq, p, 0);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
@@ -6363,7 +6233,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
 {
        int retval, oldprio, oldpolicy = -1, on_rq, running;
        unsigned long flags;
-       const struct sched_class *prev_class;
+       const struct sched_class *prev_class = p->sched_class;
        struct rq *rq;
        int reset_on_fork;
 
@@ -6477,7 +6347,6 @@ recheck:
        p->sched_reset_on_fork = reset_on_fork;
 
        oldprio = p->prio;
-       prev_class = p->sched_class;
        __setscheduler(rq, p, policy, param->sched_priority);
 
        if (running)
@@ -6588,7 +6457,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
                return -EINVAL;
 
        retval = -ESRCH;
-       rcu_read_lock();
+       read_lock(&tasklist_lock);
        p = find_process_by_pid(pid);
        if (p) {
                retval = security_task_getscheduler(p);
@@ -6596,7 +6465,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
                        retval = p->policy
                                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
        }
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
        return retval;
 }
 
@@ -6614,7 +6483,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        if (!param || pid < 0)
                return -EINVAL;
 
-       rcu_read_lock();
+       read_lock(&tasklist_lock);
        p = find_process_by_pid(pid);
        retval = -ESRCH;
        if (!p)
@@ -6625,7 +6494,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
                goto out_unlock;
 
        lp.sched_priority = p->rt_priority;
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
 
        /*
         * This one might sleep, we cannot do it with a spinlock held ...
@@ -6635,7 +6504,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        return retval;
 
 out_unlock:
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
        return retval;
 }
 
@@ -6646,18 +6515,22 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        int retval;
 
        get_online_cpus();
-       rcu_read_lock();
+       read_lock(&tasklist_lock);
 
        p = find_process_by_pid(pid);
        if (!p) {
-               rcu_read_unlock();
+               read_unlock(&tasklist_lock);
                put_online_cpus();
                return -ESRCH;
        }
 
-       /* Prevent p going away */
+       /*
+        * It is not safe to call set_cpus_allowed with the
+        * tasklist_lock held. We will bump the task_struct's
+        * usage count and then drop tasklist_lock.
+        */
        get_task_struct(p);
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
 
        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
                retval = -ENOMEM;
@@ -6738,12 +6611,10 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
 long sched_getaffinity(pid_t pid, struct cpumask *mask)
 {
        struct task_struct *p;
-       unsigned long flags;
-       struct rq *rq;
        int retval;
 
        get_online_cpus();
-       rcu_read_lock();
+       read_lock(&tasklist_lock);
 
        retval = -ESRCH;
        p = find_process_by_pid(pid);
@@ -6754,12 +6625,10 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        if (retval)
                goto out_unlock;
 
-       rq = task_rq_lock(p, &flags);
        cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
-       task_rq_unlock(rq, &flags);
 
 out_unlock:
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
        put_online_cpus();
 
        return retval;
@@ -6777,9 +6646,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
        int ret;
        cpumask_var_t mask;
 
-       if ((len * BITS_PER_BYTE) < nr_cpu_ids)
-               return -EINVAL;
-       if (len & (sizeof(unsigned long)-1))
+       if (len < cpumask_size())
                return -EINVAL;
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -6787,12 +6654,10 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
 
        ret = sched_getaffinity(pid, mask);
        if (ret == 0) {
-               size_t retlen = min_t(size_t, len, cpumask_size());
-
-               if (copy_to_user(user_mask_ptr, mask, retlen))
+               if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
                        ret = -EFAULT;
                else
-                       ret = retlen;
+                       ret = cpumask_size();
        }
        free_cpumask_var(mask);
 
@@ -6998,8 +6863,6 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
 {
        struct task_struct *p;
        unsigned int time_slice;
-       unsigned long flags;
-       struct rq *rq;
        int retval;
        struct timespec t;
 
@@ -7007,7 +6870,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                return -EINVAL;
 
        retval = -ESRCH;
-       rcu_read_lock();
+       read_lock(&tasklist_lock);
        p = find_process_by_pid(pid);
        if (!p)
                goto out_unlock;
@@ -7016,17 +6879,15 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        if (retval)
                goto out_unlock;
 
-       rq = task_rq_lock(p, &flags);
-       time_slice = p->sched_class->get_rr_interval(rq, p);
-       task_rq_unlock(rq, &flags);
+       time_slice = p->sched_class->get_rr_interval(p);
 
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
        jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
        return retval;
 
 out_unlock:
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
        return retval;
 }
 
@@ -7117,7 +6978,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        spin_lock_irqsave(&rq->lock, flags);
 
        __sched_fork(idle);
-       idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
@@ -7212,19 +7072,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
        struct rq *rq;
        int ret = 0;
 
-       /*
-        * Serialize against TASK_WAKING so that ttwu() and wunt() can
-        * drop the rq->lock and still rely on ->cpus_allowed.
-        */
-again:
-       while (task_is_waking(p))
-               cpu_relax();
        rq = task_rq_lock(p, &flags);
-       if (task_is_waking(p)) {
-               task_rq_unlock(rq, &flags);
-               goto again;
-       }
-
        if (!cpumask_intersects(new_mask, cpu_active_mask)) {
                ret = -EINVAL;
                goto out;
@@ -7253,7 +7101,7 @@ again:
 
                get_task_struct(mt);
                task_rq_unlock(rq, &flags);
-               wake_up_process(mt);
+               wake_up_process(rq->migration_thread);
                put_task_struct(mt);
                wait_for_completion(&req.done);
                tlb_migrate_finish(p->mm);
@@ -7280,7 +7128,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 {
        struct rq *rq_dest, *rq_src;
-       int ret = 0;
+       int ret = 0, on_rq;
 
        if (unlikely(!cpu_active(dest_cpu)))
                return ret;
@@ -7296,13 +7144,12 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
        if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
                goto fail;
 
-       /*
-        * If we're not on a rq, the next wake-up will ensure we're
-        * placed properly.
-        */
-       if (p->se.on_rq) {
+       on_rq = p->se.on_rq;
+       if (on_rq)
                deactivate_task(rq_src, p, 0);
-               set_task_cpu(p, dest_cpu);
+
+       set_task_cpu(p, dest_cpu);
+       if (on_rq) {
                activate_task(rq_dest, p, 0);
                check_preempt_curr(rq_dest, p, 0);
        }
@@ -7381,29 +7228,57 @@ static int migration_thread(void *data)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+
+static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
+{
+       int ret;
+
+       local_irq_disable();
+       ret = __migrate_task(p, src_cpu, dest_cpu);
+       local_irq_enable();
+       return ret;
+}
+
 /*
  * Figure out where task on dead CPU should go, use force if necessary.
  */
-void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
+static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 {
-       struct rq *rq = cpu_rq(dead_cpu);
-       int needs_cpu, uninitialized_var(dest_cpu);
-       unsigned long flags;
+       int dest_cpu;
+       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
 
-       local_irq_save(flags);
+again:
+       /* Look for allowed, online CPU in same node. */
+       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+                       goto move;
 
-       spin_lock(&rq->lock);
-       needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
-       if (needs_cpu)
-               dest_cpu = select_fallback_rq(dead_cpu, p);
-       spin_unlock(&rq->lock);
-       /*
-        * It can only fail if we race with set_cpus_allowed(),
-        * in the racer should migrate the task anyway.
-        */
-       if (needs_cpu)
-               __migrate_task(p, dead_cpu, dest_cpu);
-       local_irq_restore(flags);
+       /* Any allowed, online CPU? */
+       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+       if (dest_cpu < nr_cpu_ids)
+               goto move;
+
+       /* No more Mr. Nice Guy. */
+       if (dest_cpu >= nr_cpu_ids) {
+               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+
+               /*
+                * Don't tell them about moving exiting tasks or
+                * kernel threads (both mm NULL), since they never
+                * leave kernel.
+                */
+               if (p->mm && printk_ratelimit()) {
+                       printk(KERN_INFO "process %d (%s) no "
+                              "longer affine to cpu%d\n",
+                              task_pid_nr(p), p->comm, dead_cpu);
+               }
+       }
+
+move:
+       /* It can have affinity changed while we were choosing. */
+       if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
+               goto again;
 }
 
 /*
@@ -7751,9 +7626,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
        unsigned long flags;
        struct rq *rq;
 
-       switch (action & ~CPU_TASKS_FROZEN) {
+       switch (action) {
 
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
                if (IS_ERR(p))
                        return NOTIFY_BAD;
@@ -7768,6 +7644,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                /* Strictly unnecessary, as first user will wake it. */
                wake_up_process(cpu_rq(cpu)->migration_thread);
 
@@ -7784,6 +7661,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                if (!cpu_rq(cpu)->migration_thread)
                        break;
                /* Unbind it from offline cpu so it can run. Fall thru. */
@@ -7794,22 +7672,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                cpu_rq(cpu)->migration_thread = NULL;
                break;
 
-       case CPU_POST_DEAD:
-               /*
-                * Bring the migration thread down in CPU_POST_DEAD event,
-                * since the timers should have got migrated by now and thus
-                * we should not see a deadlock between trying to kill the
-                * migration thread and the sched_rt_period_timer.
-                */
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
+               migrate_live_tasks(cpu);
                rq = cpu_rq(cpu);
                kthread_stop(rq->migration_thread);
                put_task_struct(rq->migration_thread);
                rq->migration_thread = NULL;
-               break;
-
-       case CPU_DEAD:
-               migrate_live_tasks(cpu);
-               rq = cpu_rq(cpu);
                /* Idle task back to normal (off runqueue, low prio) */
                spin_lock_irq(&rq->lock);
                update_rq_clock(rq);
@@ -7818,6 +7688,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                rq->idle->sched_class = &idle_sched_class;
                migrate_dead_tasks(cpu);
                spin_unlock_irq(&rq->lock);
+               cpuset_unlock();
                migrate_nr_uninterruptible(rq);
                BUG_ON(rq->nr_running != 0);
                calc_global_load_remove(rq);
@@ -7841,6 +7712,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 
        case CPU_DYING:
+       case CPU_DYING_FROZEN:
                /* Update our root-domain */
                rq = cpu_rq(cpu);
                spin_lock_irqsave(&rq->lock, flags);
@@ -8160,9 +8032,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct sched_domain *tmp;
 
-       for (tmp = sd; tmp; tmp = tmp->parent)
-               tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
-
        /* Remove the sched domains which do not contribute to scheduling. */
        for (tmp = sd; tmp; ) {
                struct sched_domain *parent = tmp->parent;
@@ -10161,13 +10030,13 @@ void sched_move_task(struct task_struct *tsk)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->moved_group)
-               tsk->sched_class->moved_group(tsk, on_rq);
+               tsk->sched_class->moved_group(tsk);
 #endif
 
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
        if (on_rq)
-               enqueue_task(rq, tsk, 0, false);
+               enqueue_task(rq, tsk, 0);
 
        task_rq_unlock(rq, &flags);
 }
@@ -10947,23 +10816,6 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
        rcu_read_unlock();
 }
 
-/*
- * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
- * in cputime_t units. As a result, cpuacct_update_stats calls
- * percpu_counter_add with values large enough to always overflow the
- * per cpu batch limit causing bad SMP scalability.
- *
- * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
- * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
- * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
- */
-#ifdef CONFIG_SMP
-#define CPUACCT_BATCH  \
-       min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
-#else
-#define CPUACCT_BATCH  0
-#endif
-
 /*
  * Charge the system/user time to the task's accounting group.
  */
@@ -10971,7 +10823,6 @@ static void cpuacct_update_stats(struct task_struct *tsk,
                enum cpuacct_stat_index idx, cputime_t val)
 {
        struct cpuacct *ca;
-       int batch = CPUACCT_BATCH;
 
        if (unlikely(!cpuacct_subsys.active))
                return;
@@ -10980,7 +10831,7 @@ static void cpuacct_update_stats(struct task_struct *tsk,
        ca = task_ca(tsk);
 
        do {
-               __percpu_counter_add(&ca->cpustat[idx], val, batch);
+               percpu_counter_add(&ca->cpustat[idx], val);
                ca = ca->parent;
        } while (ca);
        rcu_read_unlock();
index 6f836a89375ba91f0789ba7063339909428852a7..6988cf08f705d3bbd7b8cd16a43fe70cd32b795e 100644 (file)
@@ -423,6 +423,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        P(se.nr_failed_migrations_running);
        P(se.nr_failed_migrations_hot);
        P(se.nr_forced_migrations);
+       P(se.nr_forced2_migrations);
        P(se.nr_wakeups);
        P(se.nr_wakeups_sync);
        P(se.nr_wakeups_migrate);
@@ -498,6 +499,7 @@ void proc_sched_set_task(struct task_struct *p)
        p->se.nr_failed_migrations_running      = 0;
        p->se.nr_failed_migrations_hot          = 0;
        p->se.nr_forced_migrations              = 0;
+       p->se.nr_forced2_migrations             = 0;
        p->se.nr_wakeups                        = 0;
        p->se.nr_wakeups_sync                   = 0;
        p->se.nr_wakeups_migrate                = 0;
index 01e311e6b47fd6b9368e49327f2467d8d148bc60..d80812d39d6238378ccac64538056d762128f084 100644 (file)
@@ -488,7 +488,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
        curr->sum_exec_runtime += delta_exec;
        schedstat_add(cfs_rq, exec_clock, delta_exec);
        delta_exec_weighted = calc_delta_fair(delta_exec, curr);
-
        curr->vruntime += delta_exec_weighted;
        update_min_vruntime(cfs_rq);
 }
@@ -744,26 +743,16 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        se->vruntime = vruntime;
 }
 
-#define ENQUEUE_WAKEUP 1
-#define ENQUEUE_MIGRATE 2
-
 static void
-enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
 {
-       /*
-        * Update the normalized vruntime before updating min_vruntime
-        * through callig update_curr().
-        */
-       if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
-               se->vruntime += cfs_rq->min_vruntime;
-
        /*
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
        account_entity_enqueue(cfs_rq, se);
 
-       if (flags & ENQUEUE_WAKEUP) {
+       if (wakeup) {
                place_entity(cfs_rq, se, 0);
                enqueue_sleeper(cfs_rq, se);
        }
@@ -817,14 +806,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
                __dequeue_entity(cfs_rq, se);
        account_entity_dequeue(cfs_rq, se);
        update_min_vruntime(cfs_rq);
-
-       /*
-        * Normalize the entity after updating the min_vruntime because the
-        * update can refer to the ->curr item and we need to reflect this
-        * movement in our normalized position.
-        */
-       if (!sleep)
-               se->vruntime -= cfs_rq->min_vruntime;
 }
 
 /*
@@ -1031,24 +1012,17 @@ static inline void hrtick_update(struct rq *rq)
  * increased. Here we update the fair scheduling stats and
  * then put the task into the rbtree:
  */
-static void
-enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
-       int flags = 0;
-
-       if (wakeup)
-               flags |= ENQUEUE_WAKEUP;
-       if (p->state == TASK_WAKING)
-               flags |= ENQUEUE_MIGRATE;
 
        for_each_sched_entity(se) {
                if (se->on_rq)
                        break;
                cfs_rq = cfs_rq_of(se);
-               enqueue_entity(cfs_rq, se, flags);
-               flags = ENQUEUE_WAKEUP;
+               enqueue_entity(cfs_rq, se, wakeup);
+               wakeup = 1;
        }
 
        hrtick_update(rq);
@@ -1124,14 +1098,6 @@ static void yield_task_fair(struct rq *rq)
 
 #ifdef CONFIG_SMP
 
-static void task_waking_fair(struct rq *rq, struct task_struct *p)
-{
-       struct sched_entity *se = &p->se;
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
-       se->vruntime -= cfs_rq->min_vruntime;
-}
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /*
  * effective_load() calculates the load change as seen from the root_task_group
@@ -1250,7 +1216,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * effect of the currently running task from the load
         * of the current CPU:
         */
-       rcu_read_lock();
        if (sync) {
                tg = task_group(current);
                weight = current->se.load.weight;
@@ -1276,7 +1241,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
        balanced = !this_load ||
                100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
                imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
-       rcu_read_unlock();
 
        /*
         * If the currently running task will sleep within
@@ -1383,56 +1347,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
        return idlest;
 }
 
-/*
- * Try and locate an idle CPU in the sched_domain.
- */
-static int select_idle_sibling(struct task_struct *p, int target)
-{
-       int cpu = smp_processor_id();
-       int prev_cpu = task_cpu(p);
-       struct sched_domain *sd;
-       int i;
-
-       /*
-        * If the task is going to be woken-up on this cpu and if it is
-        * already idle, then it is the right target.
-        */
-       if (target == cpu && idle_cpu(cpu))
-               return cpu;
-
-       /*
-        * If the task is going to be woken-up on the cpu where it previously
-        * ran and if it is currently idle, then it the right target.
-        */
-       if (target == prev_cpu && idle_cpu(prev_cpu))
-               return prev_cpu;
-
-       /*
-        * Otherwise, iterate the domains and find an elegible idle cpu.
-        */
-       for_each_domain(target, sd) {
-               if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
-                       break;
-
-               for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
-                       if (idle_cpu(i)) {
-                               target = i;
-                               break;
-                       }
-               }
-
-               /*
-                * Lets stop looking for an idle sibling when we reached
-                * the domain that spans the current cpu and prev_cpu.
-                */
-               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
-                       break;
-       }
-
-       return target;
-}
-
 /*
  * sched_balance_self: balance the current task (running on cpu) in domains
  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
@@ -1444,8 +1358,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
  *
  * preempt must be disabled.
  */
-static int
-select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
+static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
 {
        struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
        int cpu = smp_processor_id();
@@ -1462,6 +1375,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                new_cpu = prev_cpu;
        }
 
+       rcu_read_lock();
        for_each_domain(cpu, tmp) {
                if (!(tmp->flags & SD_LOAD_BALANCE))
                        continue;
@@ -1490,14 +1404,38 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                                want_sd = 0;
                }
 
-               /*
-                * If both cpu and prev_cpu are part of this domain,
-                * cpu is a valid SD_WAKE_AFFINE target.
-                */
-               if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
-                       affine_sd = tmp;
-                       want_affine = 0;
+               if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
+                       int candidate = -1, i;
+
+                       if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
+                               candidate = cpu;
+
+                       /*
+                        * Check for an idle shared cache.
+                        */
+                       if (tmp->flags & SD_PREFER_SIBLING) {
+                               if (candidate == cpu) {
+                                       if (!cpu_rq(prev_cpu)->cfs.nr_running)
+                                               candidate = prev_cpu;
+                               }
+
+                               if (candidate == -1 || candidate == cpu) {
+                                       for_each_cpu(i, sched_domain_span(tmp)) {
+                                               if (!cpumask_test_cpu(i, &p->cpus_allowed))
+                                                       continue;
+                                               if (!cpu_rq(i)->cfs.nr_running) {
+                                                       candidate = i;
+                                                       break;
+                                               }
+                                       }
+                               }
+                       }
+
+                       if (candidate >= 0) {
+                               affine_sd = tmp;
+                               want_affine = 0;
+                               cpu = candidate;
+                       }
                }
 
                if (!want_sd && !want_affine)
@@ -1510,28 +1448,23 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                        sd = tmp;
        }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
        if (sched_feat(LB_SHARES_UPDATE)) {
                /*
                 * Pick the largest domain to update shares over
                 */
                tmp = sd;
-               if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
+               if (affine_sd && (!tmp ||
+                                 cpumask_weight(sched_domain_span(affine_sd)) >
+                                 cpumask_weight(sched_domain_span(sd))))
                        tmp = affine_sd;
 
-               if (tmp) {
-                       spin_unlock(&rq->lock);
+               if (tmp)
                        update_shares(tmp);
-                       spin_lock(&rq->lock);
-               }
        }
-#endif
 
-       if (affine_sd) {
-               if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
-                       return select_idle_sibling(p, cpu);
-               else
-                       return select_idle_sibling(p, prev_cpu);
+       if (affine_sd && wake_affine(affine_sd, p, sync)) {
+               new_cpu = cpu;
+               goto out;
        }
 
        while (sd) {
@@ -1562,10 +1495,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
 
                /* Now try balancing at a lower domain level of new_cpu */
                cpu = new_cpu;
-               weight = sd->span_weight;
+               weight = cpumask_weight(sched_domain_span(sd));
                sd = NULL;
                for_each_domain(cpu, tmp) {
-                       if (weight <= tmp->span_weight)
+                       if (weight <= cpumask_weight(sched_domain_span(tmp)))
                                break;
                        if (tmp->flags & sd_flag)
                                sd = tmp;
@@ -1573,6 +1506,8 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                /* while loop will break here if sd == NULL */
        }
 
+out:
+       rcu_read_unlock();
        return new_cpu;
 }
 #endif /* CONFIG_SMP */
@@ -1976,32 +1911,28 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 }
 
 /*
- * called on fork with the child task as argument from the parent's context
- *  - child not yet on the tasklist
- *  - preemption disabled
+ * Share the fairness runtime between parent and child, thus the
+ * total amount of pressure for CPU stays equal - new tasks
+ * get a chance to run but frequent forkers are not allowed to
+ * monopolize the CPU. Note: the parent runqueue is locked,
+ * the child is not running yet.
  */
-static void task_fork_fair(struct task_struct *p)
+static void task_new_fair(struct rq *rq, struct task_struct *p)
 {
-       struct cfs_rq *cfs_rq = task_cfs_rq(current);
+       struct cfs_rq *cfs_rq = task_cfs_rq(p);
        struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
        int this_cpu = smp_processor_id();
-       struct rq *rq = this_rq();
-       unsigned long flags;
-
-       spin_lock_irqsave(&rq->lock, flags);
-
-       update_rq_clock(rq);
 
-       if (unlikely(task_cpu(p) != this_cpu))
-               __set_task_cpu(p, this_cpu);
+       sched_info_queued(p);
 
        update_curr(cfs_rq);
-
        if (curr)
                se->vruntime = curr->vruntime;
        place_entity(cfs_rq, se, 1);
 
-       if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
+       /* 'curr' will be NULL if the child belongs to a different group */
+       if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
+                       curr && entity_before(curr, se)) {
                /*
                 * Upon rescheduling, sched_class::put_prev_task() will place
                 * 'current' within the tree based on its new key value.
@@ -2010,9 +1941,7 @@ static void task_fork_fair(struct task_struct *p)
                resched_task(rq->curr);
        }
 
-       se->vruntime -= cfs_rq->min_vruntime;
-
-       spin_unlock_irqrestore(&rq->lock, flags);
+       enqueue_task_fair(rq, p, 0);
 }
 
 /*
@@ -2065,27 +1994,30 @@ static void set_curr_task_fair(struct rq *rq)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static void moved_group_fair(struct task_struct *p, int on_rq)
+static void moved_group_fair(struct task_struct *p)
 {
        struct cfs_rq *cfs_rq = task_cfs_rq(p);
 
        update_curr(cfs_rq);
-       if (!on_rq)
-               place_entity(cfs_rq, &p->se, 1);
+       place_entity(cfs_rq, &p->se, 1);
 }
 #endif
 
-unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
+unsigned int get_rr_interval_fair(struct task_struct *task)
 {
        struct sched_entity *se = &task->se;
+       unsigned long flags;
+       struct rq *rq;
        unsigned int rr_interval = 0;
 
        /*
         * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
         * idle runqueue:
         */
+       rq = task_rq_lock(task, &flags);
        if (rq->cfs.load.weight)
                rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+       task_rq_unlock(rq, &flags);
 
        return rr_interval;
 }
@@ -2111,13 +2043,11 @@ static const struct sched_class fair_sched_class = {
        .move_one_task          = move_one_task_fair,
        .rq_online              = rq_online_fair,
        .rq_offline             = rq_offline_fair,
-
-       .task_waking            = task_waking_fair,
 #endif
 
        .set_curr_task          = set_curr_task_fair,
        .task_tick              = task_tick_fair,
-       .task_fork              = task_fork_fair,
+       .task_new               = task_new_fair,
 
        .prio_changed           = prio_changed_fair,
        .switched_to            = switched_to_fair,
index 93ad2e7953cf354b32eea2244b9a352393c432a7..b133a28fcde32cbf20421f077686c14319871f74 100644 (file)
@@ -6,8 +6,7 @@
  */
 
 #ifdef CONFIG_SMP
-static int
-select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
+static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
 {
        return task_cpu(p); /* IDLE tasks as never migrated */
 }
@@ -98,7 +97,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
                check_preempt_curr(rq, p, 0);
 }
 
-unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
+unsigned int get_rr_interval_idle(struct task_struct *task)
 {
        return 0;
 }
index af24fab76a9e2b51b3fff6e64da1bfd6b299a72a..a4d790cddb1983196ba9881936d8abf7a8870dbe 100644 (file)
@@ -194,7 +194,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
        return rt_se->my_q;
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
@@ -204,7 +204,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 
        if (rt_rq->rt_nr_running) {
                if (rt_se && !on_rt_rq(rt_se))
-                       enqueue_rt_entity(rt_se, false);
+                       enqueue_rt_entity(rt_se);
                if (rt_rq->highest_prio.curr < curr->prio)
                        resched_task(curr);
        }
@@ -803,7 +803,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
        dec_rt_group(rt_se, rt_rq);
 }
 
-static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
 {
        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        struct rt_prio_array *array = &rt_rq->active;
@@ -819,10 +819,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
        if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
                return;
 
-       if (head)
-               list_add(&rt_se->run_list, queue);
-       else
-               list_add_tail(&rt_se->run_list, queue);
+       list_add_tail(&rt_se->run_list, queue);
        __set_bit(rt_se_prio(rt_se), array->bitmap);
 
        inc_rt_tasks(rt_se, rt_rq);
@@ -859,11 +856,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
        }
 }
 
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
 {
        dequeue_rt_stack(rt_se);
        for_each_sched_rt_entity(rt_se)
-               __enqueue_rt_entity(rt_se, head);
+               __enqueue_rt_entity(rt_se);
 }
 
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
@@ -874,22 +871,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
                struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
                if (rt_rq && rt_rq->rt_nr_running)
-                       __enqueue_rt_entity(rt_se, false);
+                       __enqueue_rt_entity(rt_se);
        }
 }
 
 /*
  * Adding/removing a task to/from a priority array:
  */
-static void
-enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
 {
        struct sched_rt_entity *rt_se = &p->rt;
 
        if (wakeup)
                rt_se->timeout = 0;
 
-       enqueue_rt_entity(rt_se, head);
+       enqueue_rt_entity(rt_se);
 
        if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
@@ -942,9 +938,10 @@ static void yield_task_rt(struct rq *rq)
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
-static int
-select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
+static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
 {
+       struct rq *rq = task_rq(p);
+
        if (sd_flag != SD_BALANCE_WAKE)
                return smp_processor_id();
 
@@ -1488,7 +1485,7 @@ static void post_schedule_rt(struct rq *rq)
  * If we are not running and we are not going to reschedule soon, we should
  * try to push tasks away now
  */
-static void task_woken_rt(struct rq *rq, struct task_struct *p)
+static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
 {
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
@@ -1737,7 +1734,7 @@ static void set_curr_task_rt(struct rq *rq)
        dequeue_pushable_task(rq, p);
 }
 
-unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
+unsigned int get_rr_interval_rt(struct task_struct *task)
 {
        /*
         * Time slice is 0 for SCHED_FIFO tasks
@@ -1769,7 +1766,7 @@ static const struct sched_class rt_sched_class = {
        .rq_offline             = rq_offline_rt,
        .pre_schedule           = pre_schedule_rt,
        .post_schedule          = post_schedule_rt,
-       .task_woken             = task_woken_rt,
+       .task_wake_up           = task_wake_up_rt,
        .switched_from          = switched_from_rt,
 #endif
 
index 423655af186bfc341ac566701c8fbb400a972146..4d0658dda0e5a0ad05d27281fe22e2f718b7ee0e 100644 (file)
@@ -591,7 +591,7 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
 static int check_kill_permission(int sig, struct siginfo *info,
                                 struct task_struct *t)
 {
-       const struct cred *cred, *tcred;
+       const struct cred *cred = current_cred(), *tcred;
        struct pid *sid;
        int error;
 
@@ -605,10 +605,8 @@ static int check_kill_permission(int sig, struct siginfo *info,
        if (error)
                return error;
 
-       cred = current_cred();
        tcred = __task_cred(t);
-       if (!same_thread_group(current, t) &&
-           (cred->euid ^ tcred->suid) &&
+       if ((cred->euid ^ tcred->suid) &&
            (cred->euid ^ tcred->uid) &&
            (cred->uid  ^ tcred->suid) &&
            (cred->uid  ^ tcred->uid) &&
index 3514c4449604bda6333ff158d52652a6086c7025..00889bd3c5903812ed89d722135b15330b513dbb 100644 (file)
@@ -640,7 +640,7 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
                        goto cancelled;
 
                /* the timer holds a reference whilst it is pending */
-               ret = slow_work_get_ref(work);
+               ret = work->ops->get_ref(work);
                if (ret < 0)
                        goto cant_get_ref;
 
index d2080ad196fd0ffc950f63f6de4ae03cc189f780..81324d12eb35a5db7fae8a0ae3d18c76ca38ce67 100644 (file)
@@ -140,11 +140,11 @@ void softlockup_tick(void)
         * Wake up the high-prio watchdog task twice per
         * threshold timespan.
         */
-       if (time_after(now - softlockup_thresh/2, touch_timestamp))
+       if (now > touch_timestamp + softlockup_thresh/2)
                wake_up_process(per_cpu(watchdog_task, this_cpu));
 
        /* Warn about unreasonable delays: */
-       if (time_before_eq(now - softlockup_thresh, touch_timestamp))
+       if (now <= (touch_timestamp + softlockup_thresh))
                return;
 
        per_cpu(print_timestamp, this_cpu) = touch_timestamp;
index 75371c281fc35db0c077606bb3b5d884b37c0cc4..3bfc93832e5a37400b655ab16223aaa087d4ccde 100644 (file)
@@ -942,15 +942,16 @@ change_okay:
 
 void do_sys_times(struct tms *tms)
 {
-       cputime_t tgutime, tgstime, cutime, cstime;
+       struct task_cputime cputime;
+       cputime_t cutime, cstime;
 
+       thread_group_cputime(current, &cputime);
        spin_lock_irq(&current->sighand->siglock);
-       thread_group_times(current, &tgutime, &tgstime);
        cutime = current->signal->cutime;
        cstime = current->signal->cstime;
        spin_unlock_irq(&current->sighand->siglock);
-       tms->tms_utime = cputime_to_clock_t(tgutime);
-       tms->tms_stime = cputime_to_clock_t(tgstime);
+       tms->tms_utime = cputime_to_clock_t(cputime.utime);
+       tms->tms_stime = cputime_to_clock_t(cputime.stime);
        tms->tms_cutime = cputime_to_clock_t(cutime);
        tms->tms_cstime = cputime_to_clock_t(cstime);
 }
@@ -993,7 +994,6 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
                pgid = pid;
        if (pgid < 0)
                return -EINVAL;
-       rcu_read_lock();
 
        /* From this point forward we keep holding onto the tasklist lock
         * so that our parent does not change from under us. -DaveM
@@ -1047,7 +1047,6 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 out:
        /* All paths lead to here, thus we are safe. -DaveM */
        write_unlock_irq(&tasklist_lock);
-       rcu_read_unlock();
        return err;
 }
 
@@ -1370,7 +1369,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
 {
        struct task_struct *t;
        unsigned long flags;
-       cputime_t tgutime, tgstime, utime, stime;
+       cputime_t utime, stime;
+       struct task_cputime cputime;
        unsigned long maxrss = 0;
 
        memset((char *) r, 0, sizeof *r);
@@ -1404,9 +1404,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
                                break;
 
                case RUSAGE_SELF:
-                       thread_group_times(p, &tgutime, &tgstime);
-                       utime = cputime_add(utime, tgutime);
-                       stime = cputime_add(stime, tgstime);
+                       thread_group_cputime(p, &cputime);
+                       utime = cputime_add(utime, cputime.utime);
+                       stime = cputime_add(stime, cputime.stime);
                        r->ru_nvcsw += p->signal->nvcsw;
                        r->ru_nivcsw += p->signal->nivcsw;
                        r->ru_minflt += p->signal->min_flt;
index f8b0f96110be2a20f94ba0f58c69dacbf4ef60db..ecc7adbf0918626982201f0710b0abbccc452abf 100644 (file)
@@ -515,10 +515,6 @@ static inline void clocksource_select(void) { }
  */
 static int __init clocksource_done_booting(void)
 {
-       mutex_lock(&clocksource_mutex);
-       curr_clocksource = clocksource_default_clock();
-       mutex_unlock(&clocksource_mutex);
-
        finished_booting = 1;
 
        /*
index b63cfebc680b384b69a0263b3afd4c65dcd20f1e..44320b1e2a6c6704ca4cde51a5882cdfdb1d23e1 100644 (file)
@@ -134,13 +134,18 @@ __setup("nohz=", setup_tick_nohz);
  * value. We do this unconditionally on any cpu, as we don't know whether the
  * cpu, which has the update task assigned is in a long sleep.
  */
-static void tick_nohz_update_jiffies(ktime_t now)
+static void tick_nohz_update_jiffies(void)
 {
        int cpu = smp_processor_id();
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
        unsigned long flags;
+       ktime_t now;
+
+       if (!ts->tick_stopped)
+               return;
 
        cpumask_clear_cpu(cpu, nohz_cpu_mask);
+       now = ktime_get();
        ts->idle_waketime = now;
 
        local_irq_save(flags);
@@ -150,17 +155,20 @@ static void tick_nohz_update_jiffies(ktime_t now)
        touch_softlockup_watchdog();
 }
 
-static void tick_nohz_stop_idle(int cpu, ktime_t now)
+static void tick_nohz_stop_idle(int cpu)
 {
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-       ktime_t delta;
 
-       delta = ktime_sub(now, ts->idle_entrytime);
-       ts->idle_lastupdate = now;
-       ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
-       ts->idle_active = 0;
+       if (ts->idle_active) {
+               ktime_t now, delta;
+               now = ktime_get();
+               delta = ktime_sub(now, ts->idle_entrytime);
+               ts->idle_lastupdate = now;
+               ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+               ts->idle_active = 0;
 
-       sched_clock_idle_wakeup_event(0);
+               sched_clock_idle_wakeup_event(0);
+       }
 }
 
 static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
@@ -281,15 +289,12 @@ void tick_nohz_stop_sched_tick(int inidle)
                        time_delta = KTIME_MAX;
        } while (read_seqretry(&xtime_lock, seq));
 
-       if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
-           arch_needs_cpu(cpu)) {
-               next_jiffies = last_jiffies + 1;
+       /* Get the next timer wheel timer */
+       next_jiffies = get_next_timer_interrupt(last_jiffies);
+       delta_jiffies = next_jiffies - last_jiffies;
+
+       if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
                delta_jiffies = 1;
-       } else {
-               /* Get the next timer wheel timer */
-               next_jiffies = get_next_timer_interrupt(last_jiffies);
-               delta_jiffies = next_jiffies - last_jiffies;
-       }
        /*
         * Do not stop the tick, if we are only one off
         * or if the cpu is required for rcu
@@ -455,11 +460,7 @@ void tick_nohz_restart_sched_tick(void)
        ktime_t now;
 
        local_irq_disable();
-       if (ts->idle_active || (ts->inidle && ts->tick_stopped))
-               now = ktime_get();
-
-       if (ts->idle_active)
-               tick_nohz_stop_idle(cpu, now);
+       tick_nohz_stop_idle(cpu);
 
        if (!ts->inidle || !ts->tick_stopped) {
                ts->inidle = 0;
@@ -473,6 +474,7 @@ void tick_nohz_restart_sched_tick(void)
 
        /* Update jiffies first */
        select_nohz_load_balancer(0);
+       now = ktime_get();
        tick_do_update_jiffies64(now);
        cpumask_clear_cpu(cpu, nohz_cpu_mask);
 
@@ -606,18 +608,22 @@ static void tick_nohz_switch_to_nohz(void)
  * timer and do not touch the other magic bits which need to be done
  * when idle is left.
  */
-static void tick_nohz_kick_tick(int cpu, ktime_t now)
+static void tick_nohz_kick_tick(int cpu)
 {
 #if 0
        /* Switch back to 2.6.27 behaviour */
 
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-       ktime_t delta;
+       ktime_t delta, now;
+
+       if (!ts->tick_stopped)
+               return;
 
        /*
         * Do not touch the tick device, when the next expiry is either
         * already reached or less/equal than the tick period.
         */
+       now = ktime_get();
        delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
        if (delta.tv64 <= tick_period.tv64)
                return;
@@ -626,26 +632,9 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
 #endif
 }
 
-static inline void tick_check_nohz(int cpu)
-{
-       struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-       ktime_t now;
-
-       if (!ts->idle_active && !ts->tick_stopped)
-               return;
-       now = ktime_get();
-       if (ts->idle_active)
-               tick_nohz_stop_idle(cpu, now);
-       if (ts->tick_stopped) {
-               tick_nohz_update_jiffies(now);
-               tick_nohz_kick_tick(cpu, now);
-       }
-}
-
 #else
 
 static inline void tick_nohz_switch_to_nohz(void) { }
-static inline void tick_check_nohz(int cpu) { }
 
 #endif /* NO_HZ */
 
@@ -655,7 +644,11 @@ static inline void tick_check_nohz(int cpu) { }
 void tick_check_idle(int cpu)
 {
        tick_check_oneshot_broadcast(cpu);
-       tick_check_nohz(cpu);
+#ifdef CONFIG_NO_HZ
+       tick_nohz_stop_idle(cpu);
+       tick_nohz_update_jiffies();
+       tick_nohz_kick_tick(cpu);
+#endif
 }
 
 /*
index 26e2f3705cc159e75324c28e3caacfdfea379484..8b709dee8e3415ca03fd46316dcb300e58abeaa4 100644 (file)
@@ -177,7 +177,7 @@ void timekeeping_leap_insert(int leapsecond)
 {
        xtime.tv_sec += leapsecond;
        wall_to_monotonic.tv_sec -= leapsecond;
-       update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
+       update_vsyscall(&xtime, timekeeper.clock);
 }
 
 #ifdef CONFIG_GENERIC_TIME
@@ -337,7 +337,7 @@ int do_settimeofday(struct timespec *tv)
        timekeeper.ntp_error = 0;
        ntp_clear();
 
-       update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
+       update_vsyscall(&xtime, timekeeper.clock);
 
        write_sequnlock_irqrestore(&xtime_lock, flags);
 
@@ -822,7 +822,7 @@ void update_wall_time(void)
        update_xtime_cache(nsecs);
 
        /* check to see if there is a new clocksource to use */
-       update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
+       update_vsyscall(&xtime, timekeeper.clock);
 }
 
 /**
index 54c0ddaa06c98d3b4b244657c54145183c90e28d..1b5b7aa2fdfd094fec018efbc533b65a686b1339 100644 (file)
@@ -150,9 +150,6 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
        P_ns(expires_next);
        P(hres_active);
        P(nr_events);
-       P(nr_retries);
-       P(nr_hangs);
-       P_ns(max_hang_time);
 #endif
 #undef P
 #undef P_ns
@@ -255,7 +252,7 @@ static int timer_list_show(struct seq_file *m, void *v)
        u64 now = ktime_to_ns(ktime_get());
        int cpu;
 
-       SEQ_printf(m, "Timer List Version: v0.5\n");
+       SEQ_printf(m, "Timer List Version: v0.4\n");
        SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
        SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
 
index 22cf21e9e792a2c70713427f2a224301e2313c10..6dc4e5ef7a01aab7e92479455c3672ce6096461c 100644 (file)
@@ -369,18 +369,11 @@ static int function_stat_show(struct seq_file *m, void *v)
 {
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
-       int ret = 0;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       static DEFINE_MUTEX(mutex);
        static struct trace_seq s;
        unsigned long long avg;
 #endif
-       mutex_lock(&ftrace_profile_lock);
-
-       /* we raced with function_profile_reset() */
-       if (unlikely(rec->counter == 0)) {
-               ret = -EBUSY;
-               goto out;
-       }
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
@@ -390,17 +383,17 @@ static int function_stat_show(struct seq_file *m, void *v)
        avg = rec->time;
        do_div(avg, rec->counter);
 
+       mutex_lock(&mutex);
        trace_seq_init(&s);
        trace_print_graph_duration(rec->time, &s);
        trace_seq_puts(&s, "    ");
        trace_print_graph_duration(avg, &s);
        trace_print_seq(m, &s);
+       mutex_unlock(&mutex);
 #endif
        seq_putc(m, '\n');
-out:
-       mutex_unlock(&ftrace_profile_lock);
 
-       return ret;
+       return 0;
 }
 
 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1480,8 +1473,6 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
-               /* reset in case of seek/pread */
-               iter->flags &= ~FTRACE_ITER_HASH;
                return iter;
        }
 
@@ -2402,7 +2393,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = no_llseek,
+       .llseek = ftrace_regex_lseek,
        .release = ftrace_filter_release,
 };
 
@@ -3267,7 +3258,6 @@ void ftrace_graph_init_task(struct task_struct *t)
 {
        /* Make sure we do not use the parent ret_stack */
        t->ret_stack = NULL;
-       t->curr_ret_stack = -1;
 
        if (ftrace_graph_active) {
                struct ftrace_ret_stack *ret_stack;
@@ -3277,6 +3267,7 @@ void ftrace_graph_init_task(struct task_struct *t)
                                GFP_KERNEL);
                if (!ret_stack)
                        return;
+               t->curr_ret_stack = -1;
                atomic_set(&t->tracing_graph_pause, 0);
                atomic_set(&t->trace_overrun, 0);
                t->ftrace_timestamp = 0;
index e749a054915ce14d14afae6b7f04136dfeea01cb..5dd017fea6f58bf8cbb3824770c99ba6f83fa4f2 100644 (file)
@@ -389,7 +389,7 @@ static inline int test_time_stamp(u64 delta)
 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 
 /* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
+#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
 
 int ring_buffer_print_page_header(struct trace_seq *s)
 {
@@ -2237,12 +2237,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return NULL;
 
+       if (atomic_read(&buffer->record_disabled))
+               return NULL;
+
        /* If we are tracing schedule, we don't want to recurse */
        resched = ftrace_preempt_disable();
 
-       if (atomic_read(&buffer->record_disabled))
-               goto out_nocheck;
-
        if (trace_recursive_lock())
                goto out_nocheck;
 
@@ -2474,10 +2474,10 @@ int ring_buffer_write(struct ring_buffer *buffer,
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return -EBUSY;
 
-       resched = ftrace_preempt_disable();
-
        if (atomic_read(&buffer->record_disabled))
-               goto out;
+               return -EBUSY;
+
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
 
index 3cfb60b4770b651d783832a7fa82b0e839aecf8e..b20d3ec75de9c6f505e5a591d7d746013b88d6db 100644 (file)
@@ -748,10 +748,10 @@ out:
        mutex_unlock(&trace_types_lock);
 }
 
-static void __tracing_reset(struct ring_buffer *buffer, int cpu)
+static void __tracing_reset(struct trace_array *tr, int cpu)
 {
        ftrace_disable_cpu();
-       ring_buffer_reset_cpu(buffer, cpu);
+       ring_buffer_reset_cpu(tr->buffer, cpu);
        ftrace_enable_cpu();
 }
 
@@ -763,7 +763,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
 
        /* Make sure all commits have finished */
        synchronize_sched();
-       __tracing_reset(buffer, cpu);
+       __tracing_reset(tr, cpu);
 
        ring_buffer_record_enable(buffer);
 }
@@ -781,7 +781,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
        tr->time_start = ftrace_now(tr->cpu);
 
        for_each_online_cpu(cpu)
-               __tracing_reset(buffer, cpu);
+               __tracing_reset(tr, cpu);
 
        ring_buffer_record_enable(buffer);
 }
@@ -858,8 +858,6 @@ void tracing_start(void)
                goto out;
        }
 
-       /* Prevent the buffers from switching */
-       __raw_spin_lock(&ftrace_max_lock);
 
        buffer = global_trace.buffer;
        if (buffer)
@@ -869,8 +867,6 @@ void tracing_start(void)
        if (buffer)
                ring_buffer_record_enable(buffer);
 
-       __raw_spin_unlock(&ftrace_max_lock);
-
        ftrace_start();
  out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -892,9 +888,6 @@ void tracing_stop(void)
        if (trace_stop_count++)
                goto out;
 
-       /* Prevent the buffers from switching */
-       __raw_spin_lock(&ftrace_max_lock);
-
        buffer = global_trace.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
@@ -903,8 +896,6 @@ void tracing_stop(void)
        if (buffer)
                ring_buffer_record_disable(buffer);
 
-       __raw_spin_unlock(&ftrace_max_lock);
-
  out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
 }
@@ -1171,13 +1162,6 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
 
-       /*
-        * NMIs can not handle page faults, even with fix ups.
-        * The save user stack can (and often does) fault.
-        */
-       if (unlikely(in_nmi()))
-               return;
-
        event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
                                          sizeof(*entry), flags, pc);
        if (!event)
index 374d4eeea9eab4b5a45fde30d5da975bd777b06c..405cb850b75d9a308d04d198946e9b0e8da21a39 100644 (file)
@@ -746,8 +746,7 @@ extern const char *__stop___trace_bprintk_fmt[];
 
 #undef FTRACE_ENTRY
 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)            \
-       extern struct ftrace_event_call                                 \
-       __attribute__((__aligned__(4))) event_##call;
+       extern struct ftrace_event_call event_##call;
 #undef FTRACE_ENTRY_DUP
 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)                \
        FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
index 452f188c11e0834dcbd84fb6eb5b84b50f250a97..2e78277eff9da8caef24be177ab48ec1ecce7bc5 100644 (file)
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-        string_helpers.o gcd.o lcm.o
+        string_helpers.o gcd.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
index 41b1804fa728a6cb6d38abd66ce1e71bb05eb1b6..66eef2e4483ea50caaecd2afd1e8ca652e5e28d0 100644 (file)
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
        ret->element_size = element_size;
        ret->total_nr_elements = total;
        if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
-               memset(&ret->parts[0], FLEX_ARRAY_FREE,
+               memset(ret->parts[0], FLEX_ARRAY_FREE,
                                                FLEX_ARRAY_BASE_BYTES_LEFT);
        return ret;
 }
index eda7ba372f1abad67aa1faf54a88af271ecf4ecb..80ca9aca038be447573b8130cbc22c1133f34867 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,12 +156,10 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
                        id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
 
                        /* if already at the top layer, we need to grow */
-                       if (id >= 1 << (idp->layers * IDR_BITS)) {
+                       if (!(p = pa[l])) {
                                *starting_id = id;
                                return IDR_NEED_TO_GROW;
                        }
-                       p = pa[l];
-                       BUG_ON(!p);
 
                        /* If we need to go up one layer, continue the
                         * loop; otherwise, restart from the top.
diff --git a/lib/lcm.c b/lib/lcm.c
deleted file mode 100644 (file)
index 157cd88..0000000
--- a/lib/lcm.c
+++ /dev/null
@@ -1,15 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/gcd.h>
-#include <linux/module.h>
-
-/* Lowest common multiple */
-unsigned long lcm(unsigned long a, unsigned long b)
-{
-       if (a && b)
-               return (a * b) / gcd(a, b);
-       else if (b)
-               return b;
-
-       return a;
-}
-EXPORT_SYMBOL_GPL(lcm);
index 9d942128c152d035bfc62d9bda6632bb254be0ed..aeaa6d7344475518a3b73b3a7062106e77cadcea 100644 (file)
@@ -76,7 +76,6 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
        if (!fbc->counters)
                return -ENOMEM;
 #ifdef CONFIG_HOTPLUG_CPU
-       INIT_LIST_HEAD(&fbc->list);
        mutex_lock(&percpu_counters_lock);
        list_add(&fbc->list, &percpu_counters);
        mutex_unlock(&percpu_counters_lock);
index 1d5fa0870ca91d2692894506549e0dec22275f7a..a2b76a588e348e0d46f2bfed4131af6ac3eea86b 100644 (file)
@@ -115,8 +115,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
-               bounce_copy_vec(tovec, vfrom);
                flush_dcache_page(tovec->bv_page);
+               bounce_copy_vec(tovec, vfrom);
        }
 }
 
index 8d723c9e8b75b316041ea564f5e243b79a94e2b7..e43359214f6ff15020b6f05cc07aa5e6ddcb4162 100644 (file)
@@ -77,20 +77,12 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
        switch (advice) {
        case POSIX_FADV_NORMAL:
                file->f_ra.ra_pages = bdi->ra_pages;
-               spin_lock(&file->f_lock);
-               file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_RANDOM:
-               spin_lock(&file->f_lock);
-               file->f_mode |= FMODE_RANDOM;
-               spin_unlock(&file->f_lock);
+               file->f_ra.ra_pages = 0;
                break;
        case POSIX_FADV_SEQUENTIAL:
                file->f_ra.ra_pages = bdi->ra_pages * 2;
-               spin_lock(&file->f_lock);
-               file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_WILLNEED:
                if (!mapping->a_ops->readpage) {
index 9e0826ea7bbe759dfb2356d13bdfb038cc3e0869..8e96c9076f80b5df32b5d1fb89523bf4abb544a5 100644 (file)
@@ -462,7 +462,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
        /*
         * Splice_read and readahead add shmem/tmpfs pages into the page cache
         * before shmem_readpage has a chance to mark them as SwapBacked: they
-        * need to go on the anon lru below, and mem_cgroup_cache_charge
+        * need to go on the active_anon lru below, and mem_cgroup_cache_charge
         * (called in add_to_page_cache) needs to know where they're going too.
         */
        if (mapping_cap_swap_backed(mapping))
@@ -473,7 +473,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
                if (page_is_file_cache(page))
                        lru_cache_add_file(page);
                else
-                       lru_cache_add_anon(page);
+                       lru_cache_add_active_anon(page);
        }
        return ret;
 }
@@ -1030,9 +1030,6 @@ find_page:
                                goto page_not_up_to_date;
                        if (!trylock_page(page))
                                goto page_not_up_to_date;
-                       /* Did it get truncated before we got the lock? */
-                       if (!page->mapping)
-                               goto page_not_up_to_date_locked;
                        if (!mapping->a_ops->is_partially_uptodate(page,
                                                                desc, offset))
                                goto page_not_up_to_date_locked;
@@ -1123,12 +1120,6 @@ page_not_up_to_date_locked:
                }
 
 readpage:
-               /*
-                * A previous I/O error may have been due to temporary
-                * failures, eg. multipath errors.
-                * PG_error will be set again if readpage fails.
-                */
-               ClearPageError(page);
                /* Start the actual read. The read will unlock the page. */
                error = mapping->a_ops->readpage(filp, page);
 
index f5a106eeb2bd532c258c0b0d83c422de17a7b924..5d7601b0287487321314c1d969f17425b339f9b1 100644 (file)
@@ -401,7 +401,7 @@ static void clear_huge_page(struct page *page,
 {
        int i;
 
-       if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
+       if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
                clear_gigantic_page(page, addr, sz);
                return;
        }
@@ -545,7 +545,6 @@ static void free_huge_page(struct page *page)
 
        mapping = (struct address_space *) page_private(page);
        set_page_private(page, 0);
-       page->mapping = NULL;
        BUG_ON(page_count(page));
        INIT_LIST_HEAD(&page->lru);
 
@@ -1008,7 +1007,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                page = alloc_buddy_huge_page(h, vma, addr);
                if (!page) {
                        hugetlb_put_quota(inode->i_mapping, chg);
-                       return ERR_PTR(-VM_FAULT_SIGBUS);
+                       return ERR_PTR(-VM_FAULT_OOM);
                }
        }
 
@@ -2096,10 +2095,8 @@ retry:
                        spin_lock(&inode->i_lock);
                        inode->i_blocks += blocks_per_huge_page(h);
                        spin_unlock(&inode->i_lock);
-               } else {
+               } else
                        lock_page(page);
-                       page->mapping = HUGETLB_POISON;
-               }
        }
 
        /*
index f03e8e2c2270685936ab1c72ddd269a66fe68c65..17bc0df273bb5c84937f8973c0489aedbf0279a5 100644 (file)
@@ -59,7 +59,7 @@ extern void prep_compound_page(struct page *page, unsigned long order);
  */
 static inline unsigned long page_order(struct page *page)
 {
-       /* PageBuddy() must be checked by the caller */
+       VM_BUG_ON(!PageBuddy(page));
        return page_private(page);
 }
 
index ba9a0aaa4b74c1cc4bcb96670c39fa7c57110c05..66035bf926120cff9c33b0fdcf8815dde0da2961 100644 (file)
@@ -2008,12 +2008,12 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        }
        unlock_page_cgroup(pc);
 
-       *ptr = mem;
        if (mem) {
-               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false,
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
                                                page);
                css_put(&mem->css);
        }
+       *ptr = mem;
        return ret;
 }
 
index 8aeba5341ccdc833d80547901ada58891453bfa9..dacc64183874c739e6a501c780d28ee220c4ac2b 100644 (file)
@@ -589,6 +589,7 @@ static struct page_state {
 
        { lru|dirty,    lru|dirty,      "LRU",          me_pagecache_dirty },
        { lru|dirty,    lru,            "clean LRU",    me_pagecache_clean },
+       { swapbacked,   swapbacked,     "anonymous",    me_pagecache_clean },
 
        /*
         * Catchall entry: must be at end.
@@ -637,7 +638,7 @@ static int page_action(struct page_state *ps, struct page *p,
  * Do all that is necessary to remove user space mappings. Unmap
  * the pages and send SIGBUS to the processes if the data was dirty.
  */
-static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
                                  int trapno)
 {
        enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
@@ -647,18 +648,15 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        int i;
        int kill = 1;
 
-       if (PageReserved(p) || PageSlab(p))
-               return SWAP_SUCCESS;
+       if (PageReserved(p) || PageCompound(p) || PageSlab(p) || PageKsm(p))
+               return;
 
        /*
         * This check implies we don't kill processes if their pages
         * are in the swap cache early. Those are always late kills.
         */
        if (!page_mapped(p))
-               return SWAP_SUCCESS;
-
-       if (PageCompound(p) || PageKsm(p))
-               return SWAP_FAIL;
+               return;
 
        if (PageSwapCache(p)) {
                printk(KERN_ERR
@@ -720,8 +718,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
         */
        kill_procs_ao(&tokill, !!PageDirty(p), trapno,
                      ret != SWAP_SUCCESS, pfn);
-
-       return ret;
 }
 
 int __memory_failure(unsigned long pfn, int trapno, int ref)
@@ -791,13 +787,8 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
 
        /*
         * Now take care of user space mappings.
-        * Abort on fail: __remove_from_page_cache() assumes unmapped page.
         */
-       if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
-               printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
-               res = -EBUSY;
-               goto out;
-       }
+       hwpoison_user_mappings(p, pfn, trapno);
 
        /*
         * Torn down by someone else?
index 53c1da0d04a68d02bd1fd9eba8c925ceda59fec8..4e5945588c73f298961256c9361869d1cf9df9e2 100644 (file)
@@ -1282,20 +1282,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                return i ? : -EFAULT;
                        }
                        if (pages) {
-                               struct page *page;
-
-                               page = vm_normal_page(gate_vma, start, *pte);
-                               if (!page) {
-                                       if (!(gup_flags & FOLL_DUMP) &&
-                                            is_zero_pfn(pte_pfn(*pte)))
-                                               page = pte_page(*pte);
-                                       else {
-                                               pte_unmap(pte);
-                                               return i ? : -EFAULT;
-                                       }
-                               }
+                               struct page *page = vm_normal_page(gate_vma, start, *pte);
                                pages[i] = page;
-                               get_page(page);
+                               if (page)
+                                       get_page(page);
                        }
                        pte_unmap(pte);
                        if (vmas)
@@ -2629,40 +2619,6 @@ out_release:
        return ret;
 }
 
-/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
-       address &= PAGE_MASK;
-       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-               struct vm_area_struct *prev = vma->vm_prev;
-
-               /*
-                * Is there a mapping abutting this one below?
-                *
-                * That's only ok if it's the same stack mapping
-                * that has gotten split..
-                */
-               if (prev && prev->vm_end == address)
-                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
-               expand_stack(vma, address - PAGE_SIZE);
-       }
-       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
-               struct vm_area_struct *next = vma->vm_next;
-
-               /* As VM_GROWSDOWN but s/below/above/ */
-               if (next && next->vm_start == address + PAGE_SIZE)
-                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
-               expand_upwards(vma, address + PAGE_SIZE);
-       }
-       return 0;
-}
-
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -2676,23 +2632,19 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
-       pte_unmap(page_table);
-
-       /* Check if we need to add a guard page to the stack */
-       if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGBUS;
-
-       /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
                                                vma->vm_page_prot));
-               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+               ptl = pte_lockptr(mm, pmd);
+               spin_lock(ptl);
                if (!pte_none(*page_table))
                        goto unlock;
                goto setpte;
        }
 
        /* Allocate our own private page. */
+       pte_unmap(page_table);
+
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
        page = alloc_zeroed_user_highpage_movable(vma, address);
index f4be4649d5954140a2f41f191cf5974820897871..2047465cd27cf5b1829979057459857184a42b2e 100644 (file)
@@ -551,19 +551,19 @@ static inline int pageblock_free(struct page *page)
 /* Return the start of the next active pageblock after a given page */
 static struct page *next_active_pageblock(struct page *page)
 {
+       int pageblocks_stride;
+
        /* Ensure the starting page is pageblock-aligned */
        BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 
+       /* Move forward by at least 1 * pageblock_nr_pages */
+       pageblocks_stride = 1;
+
        /* If the entire pageblock is free, move to the end of free page */
-       if (pageblock_free(page)) {
-               int order;
-               /* be careful. we don't have locks, page_order can be changed.*/
-               order = page_order(page);
-               if ((order < MAX_ORDER) && (order >= pageblock_order))
-                       return page + (1 << order);
-       }
+       if (pageblock_free(page))
+               pageblocks_stride += page_order(page) - pageblock_order;
 
-       return page + pageblock_nr_pages;
+       return page + (pageblocks_stride * pageblock_nr_pages);
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
@@ -626,7 +626,7 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
  * Scanning pfn is much easier than scanning lru list.
  * Scan pfn from start to end and Find LRU page.
  */
-unsigned long scan_lru_pages(unsigned long start, unsigned long end)
+int scan_lru_pages(unsigned long start, unsigned long end)
 {
        unsigned long pfn;
        struct page *page;
index 3c6e3e29255caa2e89d446f21892625e0324212e..4545d59442431e33d7d64e2f98c8f9d1a403780b 100644 (file)
@@ -1482,7 +1482,7 @@ unsigned slab_node(struct mempolicy *policy)
                (void)first_zones_zonelist(zonelist, highest_zoneidx,
                                                        &policy->v.nodes,
                                                        &zone);
-               return zone ? zone->node : numa_node_id();
+               return zone->node;
        }
 
        default:
@@ -2122,8 +2122,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
                        char *rest = nodelist;
                        while (isdigit(*rest))
                                rest++;
-                       if (*rest)
-                               goto out;
+                       if (!*rest)
+                               err = 0;
                }
                break;
        case MPOL_INTERLEAVE:
@@ -2132,6 +2132,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
                 */
                if (!nodelist)
                        nodes = node_states[N_HIGH_MEMORY];
+               err = 0;
                break;
        case MPOL_LOCAL:
                /*
@@ -2141,19 +2142,11 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
                        goto out;
                mode = MPOL_PREFERRED;
                break;
-       case MPOL_DEFAULT:
-               /*
-                * Insist on a empty nodelist
-                */
-               if (!nodelist)
-                       err = 0;
-               goto out;
-       case MPOL_BIND:
-               /*
-                * Insist on a nodelist
-                */
-               if (!nodelist)
-                       goto out;
+
+       /*
+        * case MPOL_BIND:    mpol_new() enforces non-empty nodemask.
+        * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
+        */
        }
 
        mode_flags = 0;
@@ -2167,14 +2160,13 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
                else if (!strcmp(flags, "relative"))
                        mode_flags |= MPOL_F_RELATIVE_NODES;
                else
-                       goto out;
+                       err = 1;
        }
 
        new = mpol_new(mode, mode_flags, &nodes);
        if (IS_ERR(new))
-               goto out;
-
-       {
+               err = 1;
+       else {
                int ret;
                NODEMASK_SCRATCH(scratch);
                if (scratch) {
@@ -2185,15 +2177,13 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
                        ret = -ENOMEM;
                NODEMASK_SCRATCH_FREE(scratch);
                if (ret) {
+                       err = 1;
                        mpol_put(new);
-                       goto out;
+               } else if (no_context) {
+                       /* save for contextualization */
+                       new->w.user_nodemask = nodes;
                }
        }
-       err = 0;
-       if (no_context) {
-               /* save for contextualization */
-               new->w.user_nodemask = nodes;
-       }
 
 out:
        /* Restore string for error message */
index 2d846cfe3990daff87cba2361e05debcd9daba06..2e05c97b04f5bb7ec799a170b76bab6d4dc48918 100644 (file)
@@ -138,13 +138,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
-       return (vma->vm_flags & VM_GROWSDOWN) &&
-               (vma->vm_start == addr) &&
-               !vma_stack_continue(vma->vm_prev, addr);
-}
-
 /**
  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
  * @vma:   target vma
@@ -177,12 +170,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if (vma->vm_flags & VM_WRITE)
                gup_flags |= FOLL_WRITE;
 
-       /* We don't try to access the guard page of a stack vma */
-       if (stack_guard_page(vma, start)) {
-               addr += PAGE_SIZE;
-               nr_pages--;
-       }
-
        while (nr_pages > 0) {
                int i;
 
index 866a666690372a38032e3e49263f834a95867b69..ae197468b352bbdcecfaeb04cd0ebce73382aaef 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -389,23 +389,17 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
-       struct vm_area_struct *next;
-
-       vma->vm_prev = prev;
        if (prev) {
-               next = prev->vm_next;
+               vma->vm_next = prev->vm_next;
                prev->vm_next = vma;
        } else {
                mm->mmap = vma;
                if (rb_parent)
-                       next = rb_entry(rb_parent,
+                       vma->vm_next = rb_entry(rb_parent,
                                        struct vm_area_struct, vm_rb);
                else
-                       next = NULL;
+                       vma->vm_next = NULL;
        }
-       vma->vm_next = next;
-       if (next)
-               next->vm_prev = vma;
 }
 
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -493,11 +487,7 @@ static inline void
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev)
 {
-       struct vm_area_struct *next = vma->vm_next;
-
-       prev->vm_next = next;
-       if (next)
-               next->vm_prev = prev;
+       prev->vm_next = vma->vm_next;
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
@@ -1600,6 +1590,9 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
+#ifndef CONFIG_IA64
+static
+#endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        int error;
@@ -1805,7 +1798,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
-       vma->vm_prev = NULL;
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
@@ -1813,8 +1805,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
-       if (vma)
-               vma->vm_prev = prev;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
index e35bfb82c8555b7377334dbea42bfcf588b0bab8..f5b7d1760213e53db3c46e84dde56daf219ea0cd 100644 (file)
@@ -87,24 +87,3 @@ int memmap_valid_within(unsigned long pfn,
        return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-
-#ifdef CONFIG_SMP
-/* Called when a more accurate view of NR_FREE_PAGES is needed */
-unsigned long zone_nr_free_pages(struct zone *zone)
-{
-       unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
-
-       /*
-        * While kswapd is awake, it is considered the zone is under some
-        * memory pressure. Under pressure, there is a risk that
-        * per-cpu-counter-drift will allow the min watermark to be breached
-        * potentially causing a live-lock. While kswapd is awake and
-        * free pages are low, get a better estimate for free pages
-        */
-       if (nr_free_pages < zone->percpu_drift_mark &&
-                       !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
-               return zone_page_state_snapshot(zone, NR_FREE_PAGES);
-
-       return nr_free_pages;
-}
-#endif /* CONFIG_SMP */
index 1737c7e7af536c587c23c0f00f13b602203ac6c8..8bc969d8112d28adb32b3754d339820739e2ba8c 100644 (file)
@@ -212,7 +212,6 @@ success:
        mmu_notifier_invalidate_range_end(mm, start, end);
        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
-       perf_event_mmap(vma);
        return 0;
 
 fail:
@@ -301,6 +300,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
                error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
                if (error)
                        goto out;
+               perf_event_mmap(vma);
                nstart = tmp;
 
                if (nstart < prev->vm_end)
index 406e8d47ce49f76180667647cffc717b029ff950..9876fa0c3ad30e75d842f965ac4417e66be6f07a 100644 (file)
@@ -608,7 +608,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
  */
 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *pvma, **pp, *next;
+       struct vm_area_struct *pvma, **pp;
        struct address_space *mapping;
        struct rb_node **p, *parent;
 
@@ -668,11 +668,8 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
                        break;
        }
 
-       next = *pp;
+       vma->vm_next = *pp;
        *pp = vma;
-       vma->vm_next = next;
-       if (next)
-               next->vm_prev = vma;
 }
 
 /*
@@ -1615,7 +1612,6 @@ void exit_mmap(struct mm_struct *mm)
                mm->mmap = vma->vm_next;
                delete_vma_from_mm(vma);
                delete_vma(mm, vma);
-               cond_resched();
        }
 
        kleave("");
index 83cd9bb55092afd3bd18bdc2e1cce7888aa8c7a2..9092b43f07fa01cda075b005fb66cd482eb90ecf 100644 (file)
@@ -426,8 +426,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        list_for_each_entry(c, &p->children, sibling) {
                if (c->mm == p->mm)
                        continue;
-               if (mem && !task_in_mem_cgroup(c, mem))
-                       continue;
                if (!oom_kill_task(c))
                        return 0;
        }
index 35f7fd09fa7245ac46601644ad4d3e1ef868df65..7d4406f07d97022c652b9ece0cf5090dc820af3e 100644 (file)
@@ -532,13 +532,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
-       int to_free = count;
 
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
 
-       while (to_free) {
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+       while (count) {
                struct page *page;
                struct list_head *list;
 
@@ -563,9 +563,8 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, zone, 0, page_private(page));
                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
-               } while (--to_free && --batch_free && !list_empty(list));
+               } while (--count && --batch_free && !list_empty(list));
        }
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        spin_unlock(&zone->lock);
 }
 
@@ -576,8 +575,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
 
-       __free_one_page(page, zone, order, migratetype);
        __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+       __free_one_page(page, zone, order, migratetype);
        spin_unlock(&zone->lock);
 }
 
@@ -1367,7 +1366,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 {
        /* free_pages my go negative - that's OK */
        long min = mark;
-       long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
+       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1683,7 +1682,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        struct reclaim_state reclaim_state;
        struct task_struct *p = current;
-       bool drained = false;
 
        cond_resched();
 
@@ -1702,25 +1700,14 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
-       if (unlikely(!(*did_some_progress)))
-               return NULL;
+       if (order != 0)
+               drain_all_pages();
 
-retry:
-       page = get_page_from_freelist(gfp_mask, nodemask, order,
+       if (likely(*did_some_progress))
+               page = get_page_from_freelist(gfp_mask, nodemask, order,
                                        zonelist, high_zoneidx,
                                        alloc_flags, preferred_zone,
                                        migratetype);
-
-       /*
-        * If an allocation failed after direct reclaim, it could be because
-        * pages are pinned on the per-cpu lists. Drain them and try again
-        */
-       if (!page && !drained) {
-               drain_all_pages();
-               drained = true;
-               goto retry;
-       }
-
        return page;
 }
 
@@ -2252,7 +2239,7 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone_nr_free_pages(zone)),
+                       K(zone_page_state(zone, NR_FREE_PAGES)),
                        K(min_wmark_pages(zone)),
                        K(low_wmark_pages(zone)),
                        K(high_wmark_pages(zone)),
index 3bfd6e251f1948b6b748a14a5d0917476c76fc68..5adfc268b408936a3d18ae9014ce70e49590738d 100644 (file)
@@ -1702,9 +1702,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
                        if (pcpu_first_unit_cpu == NR_CPUS)
                                pcpu_first_unit_cpu = cpu;
-                       pcpu_last_unit_cpu = cpu;
                }
        }
+       pcpu_last_unit_cpu = cpu;
        pcpu_nr_units = unit;
 
        for_each_possible_cpu(cpu)
index fe1a069fb59571488af927b54abce0e9f56fd88e..aa1aa23452355067af9d62179cd41d62c64e68fc 100644 (file)
@@ -501,12 +501,6 @@ void page_cache_sync_readahead(struct address_space *mapping,
        if (!ra->ra_pages)
                return;
 
-       /* be dumb */
-       if (filp && (filp->f_mode & FMODE_RANDOM)) {
-               force_page_cache_readahead(mapping, filp, offset, req_size);
-               return;
-       }
-
        /* do read-ahead */
        ondemand_readahead(mapping, ra, filp, false, offset, req_size);
 }
@@ -553,17 +547,5 @@ page_cache_async_readahead(struct address_space *mapping,
 
        /* do read-ahead */
        ondemand_readahead(mapping, ra, filp, true, offset, req_size);
-
-#ifdef CONFIG_BLOCK
-       /*
-        * Normally the current page is !uptodate and lock_page() will be
-        * immediately called to implicitly unplug the device. However this
-        * is not always true for RAID conifgurations, where data arrives
-        * not strictly in their submission order. In this case we need to
-        * explicitly kick off the IO.
-        */
-       if (PageUptodate(page))
-               blk_run_backing_dev(mapping->backing_dev_info, NULL);
-#endif
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
index c8d466a4e9b7119fab913fecb3819e94d2784b00..7dfa481c96bade62ae4ba34299dcd4fb8d79cdb3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -971,11 +971,13 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 
        if (limit > 1)
                limit = 12;
-       ac_ptr = kzalloc_node(memsize, gfp, node);
+       ac_ptr = kmalloc_node(memsize, gfp, node);
        if (ac_ptr) {
                for_each_node(i) {
-                       if (i == node || !node_online(i))
+                       if (i == node || !node_online(i)) {
+                               ac_ptr[i] = NULL;
                                continue;
+                       }
                        ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
                        if (!ac_ptr[i]) {
                                for (i--; i >= 0; i--)
@@ -2249,8 +2251,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        }
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
        if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
-           && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
-               cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
+           && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+               cachep->obj_offset += PAGE_SIZE - size;
                size = PAGE_SIZE;
        }
 #endif
index 270e136349a0efc72359ad37e9956e65ba4a4eef..9c590eef79122dc0597dac27912f60e6b7dc6a72 100644 (file)
@@ -330,10 +330,8 @@ checks:
        if (offset > si->highest_bit)
                scan_base = offset = si->lowest_bit;
 
-       /* reuse swap entry of cache-only swap if not hibernation. */
-       if (vm_swap_full()
-               && cache == SWAP_CACHE
-               && si->swap_map[offset] == SWAP_HAS_CACHE) {
+       /* reuse swap entry of cache-only swap if not busy. */
+       if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
                int swap_was_freed;
                spin_unlock(&swap_lock);
                swap_was_freed = __try_to_reclaim_swap(si, offset);
index 680dcbb2d91ee75ddaaa1d0236962974a96d4696..c2287313b98598917bc6f7e194c25a2a6b0b6cb3 100644 (file)
@@ -512,15 +512,6 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
-/*
- * called before a call to iounmap() if the caller wants vm_area_struct's
- * immediately freed.
- */
-void set_iounmap_nonlazy(void)
-{
-       atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
-}
-
 /*
  * Purges all lazily-freed vmap areas.
  *
index 4649929401f888e87814ae35d87a106a9db410ae..692807f2228b25125e25963ab35e99777facdf8e 100644 (file)
@@ -1082,48 +1082,6 @@ static int too_many_isolated(struct zone *zone, int file,
        return isolated > inactive;
 }
 
-/*
- * Returns true if the caller should wait to clean dirty/writeback pages.
- *
- * If we are direct reclaiming for contiguous pages and we do not reclaim
- * everything in the list, try again and wait for writeback IO to complete.
- * This will stall high-order allocations noticeably. Only do that when really
- * need to free the pages under high memory pressure.
- */
-static inline bool should_reclaim_stall(unsigned long nr_taken,
-                                       unsigned long nr_freed,
-                                       int priority,
-                                       int lumpy_reclaim,
-                                       struct scan_control *sc)
-{
-       int lumpy_stall_priority;
-
-       /* kswapd should not stall on sync IO */
-       if (current_is_kswapd())
-               return false;
-
-       /* Only stall on lumpy reclaim */
-       if (!lumpy_reclaim)
-               return false;
-
-       /* If we have relaimed everything on the isolated list, no stall */
-       if (nr_freed == nr_taken)
-               return false;
-
-       /*
-        * For high-order allocations, there are two stall thresholds.
-        * High-cost allocations stall immediately where as lower
-        * order allocations such as stacks require the scanning
-        * priority to be much higher before stalling.
-        */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               lumpy_stall_priority = DEF_PRIORITY;
-       else
-               lumpy_stall_priority = DEF_PRIORITY / 3;
-
-       return priority <= lumpy_stall_priority;
-}
-
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
@@ -1218,9 +1176,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                nr_scanned += nr_scan;
                nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
 
-               /* Check if we should syncronously wait for writeback */
-               if (should_reclaim_stall(nr_taken, nr_freed, priority,
-                                       lumpy_reclaim, sc)) {
+               /*
+                * If we are direct reclaiming for contiguous pages and we do
+                * not reclaim everything in the list, try again and wait
+                * for IO to complete. This will stall high-order allocations
+                * but that should be acceptable to the caller
+                */
+               if (nr_freed < nr_taken && !current_is_kswapd() &&
+                   lumpy_reclaim) {
                        congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                        /*
index 42d76c65e9f29719ea206887cf1c597df99ae17d..c81321f9feec1cb28eee1b7e07237017336b89d0 100644 (file)
@@ -136,23 +136,10 @@ static void refresh_zone_stat_thresholds(void)
        int threshold;
 
        for_each_populated_zone(zone) {
-               unsigned long max_drift, tolerate_drift;
-
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
                        zone_pcp(zone, cpu)->stat_threshold = threshold;
-
-               /*
-                * Only set percpu_drift_mark if there is a danger that
-                * NR_FREE_PAGES reports the low watermark is ok when in fact
-                * the min watermark could be breached by an allocation
-                */
-               tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
-               max_drift = num_online_cpus() * threshold;
-               if (max_drift > tolerate_drift)
-                       zone->percpu_drift_mark = high_wmark_pages(zone) +
-                                       max_drift;
        }
 }
 
@@ -728,7 +715,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
-                  zone_nr_free_pages(zone),
+                  zone_page_state(zone, NR_FREE_PAGES),
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
index a2d298472254f41c3b14e493626b70ba8cc84d04..8d934dd7fd5424049a811906e0467da91da208c3 100644 (file)
@@ -948,7 +948,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
 
        csocket = NULL;
 
-       if (strlen(addr) >= UNIX_PATH_MAX) {
+       if (strlen(addr) > UNIX_PATH_MAX) {
                P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
                        addr);
                err = -ENAMETOOLONG;
index f415497046f034804fd42d9046301a7bc5305dc8..424712ca28ba1623a79becab0904dc538df93b2a 100644 (file)
@@ -2833,11 +2833,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
                        int len = cmd->len - sizeof(*rsp);
                        char req[64];
 
-                       if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
-                               l2cap_send_disconn_req(conn, sk);
-                               goto done;
-                       }
-
                        /* throw out any old stored conf requests */
                        result = L2CAP_CONF_SUCCESS;
                        len = l2cap_parse_conf_rsp(sk, rsp->data,
@@ -3910,24 +3905,16 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
        struct sock *sk;
        struct hlist_node *node;
        char *str = buf;
-       int size = PAGE_SIZE;
 
        read_lock_bh(&l2cap_sk_list.lock);
 
        sk_for_each(sk, node, &l2cap_sk_list.head) {
                struct l2cap_pinfo *pi = l2cap_pi(sk);
-               int len;
 
-               len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
+               str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
                                batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
                                sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
                                pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
-
-               size -= len;
-               if (size <= 0)
-                       break;
-
-               str += len;
        }
 
        read_unlock_bh(&l2cap_sk_list.lock);
index b3dd3f09ff2648b827de6265ab04a56ae12566be..483b8ab62f062ea490fd0089023881623b72ae3b 100755 (executable)
@@ -243,33 +243,7 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
        return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level,
                                                                auth_type);
 }
-#if 0  //cz@rock-chips.com
-static void rfcomm_session_timeout(unsigned long arg)
-{
-       struct rfcomm_session *s = (void *) arg;
-
-       BT_DBG("session %p state %ld", s, s->state);
 
-       set_bit(RFCOMM_TIMED_OUT, &s->flags);
-       rfcomm_schedule(RFCOMM_SCHED_TIMEO);
-}
-
-static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
-{
-       BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
-
-       if (!mod_timer(&s->timer, jiffies + timeout))
-               rfcomm_session_hold(s);
-}
-
-static void rfcomm_session_clear_timer(struct rfcomm_session *s)
-{
-       BT_DBG("session %p state %ld", s, s->state);
-
-       if (timer_pending(&s->timer) && del_timer(&s->timer))
-               rfcomm_session_put(s);
-}
-#endif
 /* ---- RFCOMM DLCs ---- */
 static void rfcomm_dlc_timeout(unsigned long arg)
 {
@@ -1891,14 +1865,7 @@ static inline void rfcomm_process_sessions(void)
        list_for_each_safe(p, n, &session_list) {
                struct rfcomm_session *s;
                s = list_entry(p, struct rfcomm_session, list);
-#if 0   //cz@rock-chips.com
-               if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
-                       s->state = BT_DISCONN;
-                       rfcomm_send_disc(s, 0);
-                       rfcomm_session_put(s);
-                       continue;
-               }
-#endif
+
                if (s->state == BT_LISTEN) {
                        rfcomm_accept_connection(s);
                        continue;
@@ -2075,7 +2042,6 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
        struct rfcomm_session *s;
        struct list_head *pp, *p;
        char *str = buf;
-       int size = PAGE_SIZE;
 
        rfcomm_lock();
 
@@ -2084,21 +2050,11 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
                list_for_each(pp, &s->dlcs) {
                        struct sock *sk = s->sock->sk;
                        struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
-                       int len;
 
-                       len = snprintf(str, size, "%s %s %ld %d %d %d %d\n",
+                       str += sprintf(str, "%s %s %ld %d %d %d %d\n",
                                        batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
                                        d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits);
-
-                       size -= len;
-                       if (size <= 0)
-                               break;
-
-                       str += len;
                }
-
-               if (size <= 0)
-                       break;
        }
 
        rfcomm_unlock();
index 30a36499ee7741afc4f6116c633273ee87032fae..8a20aaf1f2316676564501c49bb59c3fe56c898b 100644 (file)
@@ -1065,22 +1065,13 @@ static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf)
        struct sock *sk;
        struct hlist_node *node;
        char *str = buf;
-       int size = PAGE_SIZE;
 
        read_lock_bh(&rfcomm_sk_list.lock);
 
        sk_for_each(sk, node, &rfcomm_sk_list.head) {
-               int len;
-
-               len = snprintf(str, size, "%s %s %d %d\n",
+               str += sprintf(str, "%s %s %d %d\n",
                                batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
                                sk->sk_state, rfcomm_pi(sk)->channel);
-
-               size -= len;
-               if (size <= 0)
-                       break;
-
-               str += len;
        }
 
        read_unlock_bh(&rfcomm_sk_list.lock);
index 9bc453f020206c864f2a5f86b62f83bbd1a17a4f..e4343cabedf31f32d389c6fdd656b34da08a54d7 100644 (file)
@@ -975,22 +975,13 @@ static ssize_t sco_sysfs_show(struct class *dev, char *buf)
        struct sock *sk;
        struct hlist_node *node;
        char *str = buf;
-       int size = PAGE_SIZE;
 
        read_lock_bh(&sco_sk_list.lock);
 
        sk_for_each(sk, node, &sco_sk_list.head) {
-               int len;
-
-               len = snprintf(str, size, "%s %s %d\n",
+               str += sprintf(str, "%s %s %d\n",
                                batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
                                sk->sk_state);
-
-               size -= len;
-               if (size <= 0)
-                       break;
-
-               str += len;
        }
 
        read_unlock_bh(&sco_sk_list.lock);
index 3072272eca0e922a99996d0250121e19e9798a96..a16a2342f6bf2ee0b6a30329736c91acaff19bb9 100644 (file)
@@ -600,9 +600,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
 
        pskb_trim_rcsum(skb, len);
 
-       /* BUG: Should really parse the IP options here. */
-       memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
-
        nf_bridge_put(skb->nf_bridge);
        if (!nf_bridge_alloc(skb))
                return NF_DROP;
@@ -800,11 +797,9 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
        if (skb->nfct != NULL &&
            (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) &&
            skb->len > skb->dev->mtu &&
-           !skb_is_gso(skb)) {
-               /* BUG: Should really parse the IP options here. */
-               memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+           !skb_is_gso(skb))
                return ip_fragment(skb, br_dev_queue_push_xmit);
-       else
+       else
                return br_dev_queue_push_xmit(skb);
 }
 #else
index 4a192f7d93f8b3b53cfc74b4757909631597799f..e8d58f33fe097186d596778c7756a867752b1eac 100644 (file)
 #include <net/sock.h>
 #include <net/net_namespace.h>
 
-/*
- * To send multiple CAN frame content within TX_SETUP or to filter
- * CAN messages with multiplex index within RX_SETUP, the number of
- * different filters is limited to 256 due to the one byte index value.
- */
-#define MAX_NFRAMES 256
-
 /* use of last_frames[index].can_dlc */
 #define RX_RECV    0x40 /* received data for this element */
 #define RX_THR     0x80 /* element not been sent due to throttle feature */
@@ -95,16 +88,16 @@ struct bcm_op {
        struct list_head list;
        int ifindex;
        canid_t can_id;
-       u32 flags;
+       int flags;
        unsigned long frames_abs, frames_filtered;
        struct timeval ival1, ival2;
        struct hrtimer timer, thrtimer;
        struct tasklet_struct tsklet, thrtsklet;
        ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
        int rx_ifindex;
-       u32 count;
-       u32 nframes;
-       u32 currframe;
+       int count;
+       int nframes;
+       int currframe;
        struct can_frame *frames;
        struct can_frame *last_frames;
        struct can_frame sframe;
@@ -124,7 +117,7 @@ struct bcm_sock {
        struct list_head tx_ops;
        unsigned long dropped_usr_msgs;
        struct proc_dir_entry *bcm_proc_read;
-       char procname [20]; /* pointer printed in ASCII with \0 */
+       char procname [9]; /* pointer printed in ASCII with \0 */
 };
 
 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
@@ -181,7 +174,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
 
                seq_printf(m, "rx_op: %03X %-5s ",
                                op->can_id, bcm_proc_getifname(ifname, op->ifindex));
-               seq_printf(m, "[%u]%c ", op->nframes,
+               seq_printf(m, "[%d]%c ", op->nframes,
                                (op->flags & RX_CHECK_DLC)?'d':' ');
                if (op->kt_ival1.tv64)
                        seq_printf(m, "timeo=%lld ",
@@ -204,7 +197,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
 
        list_for_each_entry(op, &bo->tx_ops, list) {
 
-               seq_printf(m, "tx_op: %03X %s [%u] ",
+               seq_printf(m, "tx_op: %03X %s [%d] ",
                                op->can_id,
                                bcm_proc_getifname(ifname, op->ifindex),
                                op->nframes);
@@ -289,7 +282,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
        struct can_frame *firstframe;
        struct sockaddr_can *addr;
        struct sock *sk = op->sk;
-       unsigned int datalen = head->nframes * CFSIZ;
+       int datalen = head->nframes * CFSIZ;
        int err;
 
        skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -474,7 +467,7 @@ rx_changed_settime:
  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
  *                       received data stored in op->last_frames[]
  */
-static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
+static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
                                const struct can_frame *rxdata)
 {
        /*
@@ -560,8 +553,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
 /*
  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
  */
-static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
-                                 unsigned int index)
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
 {
        if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
                if (update)
@@ -582,7 +574,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
        int updated = 0;
 
        if (op->nframes > 1) {
-               unsigned int i;
+               int i;
 
                /* for MUX filter we start at index 1 */
                for (i = 1; i < op->nframes; i++)
@@ -631,7 +623,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
 {
        struct bcm_op *op = (struct bcm_op *)data;
        const struct can_frame *rxframe = (struct can_frame *)skb->data;
-       unsigned int i;
+       int i;
 
        /* disable timeout */
        hrtimer_cancel(&op->timer);
@@ -831,15 +823,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 {
        struct bcm_sock *bo = bcm_sk(sk);
        struct bcm_op *op;
-       unsigned int i;
-       int err;
+       int i, err;
 
        /* we need a real device to send frames */
        if (!ifindex)
                return -ENODEV;
 
-       /* check nframes boundaries - we need at least one can_frame */
-       if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
+       /* we need at least one can_frame */
+       if (msg_head->nframes < 1)
                return -EINVAL;
 
        /* check the given can_id */
@@ -1003,10 +994,6 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                msg_head->nframes = 0;
        }
 
-       /* the first element contains the mux-mask => MAX_NFRAMES + 1  */
-       if (msg_head->nframes > MAX_NFRAMES + 1)
-               return -EINVAL;
-
        if ((msg_head->flags & RX_RTR_FRAME) &&
            ((msg_head->nframes != 1) ||
             (!(msg_head->can_id & CAN_RTR_FLAG))))
index 9559afc639c138a52d0d9ce8d49b83c7ce8abfaa..a407c3addbae432913a20dce2560d2bab1d248f2 100644 (file)
@@ -40,12 +40,10 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
                compat_size_t len;
 
                if (get_user(len, &uiov32->iov_len) ||
-                   get_user(buf, &uiov32->iov_base))
-                       return -EFAULT;
-
-               if (len > INT_MAX - tot_len)
-                       len = INT_MAX - tot_len;
-
+                  get_user(buf, &uiov32->iov_base)) {
+                       tot_len = -EFAULT;
+                       break;
+               }
                tot_len += len;
                kiov->iov_base = compat_ptr(buf);
                kiov->iov_len = (__kernel_size_t) len;
index d04cd93f22b5cf262f60fc23ec6400c2f58282b9..584046eef9cb61920a39d943cb24a9fe71719669 100644 (file)
@@ -1484,10 +1484,10 @@ EXPORT_SYMBOL(netif_device_attach);
 
 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 {
-       return ((features & NETIF_F_NO_CSUM) ||
-               ((features & NETIF_F_V4_CSUM) &&
+       return ((features & NETIF_F_GEN_CSUM) ||
+               ((features & NETIF_F_IP_CSUM) &&
                 protocol == htons(ETH_P_IP)) ||
-               ((features & NETIF_F_V6_CSUM) &&
+               ((features & NETIF_F_IPV6_CSUM) &&
                 protocol == htons(ETH_P_IPV6)) ||
                ((features & NETIF_F_FCOE_CRC) &&
                 protocol == htons(ETH_P_FCOE)));
@@ -2519,7 +2519,7 @@ pull:
                        put_page(skb_shinfo(skb)->frags[0].page);
                        memmove(skb_shinfo(skb)->frags,
                                skb_shinfo(skb)->frags + 1,
-                               --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
+                               --skb_shinfo(skb)->nr_frags);
                }
        }
 
@@ -2630,7 +2630,7 @@ int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
        switch (ret) {
        case GRO_NORMAL:
        case GRO_HELD:
-               skb->protocol = eth_type_trans(skb, skb->dev);
+               skb->protocol = eth_type_trans(skb, napi->dev);
 
                if (ret == GRO_NORMAL)
                        return netif_receive_skb(skb);
index abbe8fa66a51baff4bb51c35966491351837639f..4c12ddb5f5ee43ab546278ba5527c2e10939824b 100644 (file)
@@ -216,34 +216,22 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
        return 0;
 }
 
-static int ethtool_set_rxnfc(struct net_device *dev,
-                            u32 cmd, void __user *useraddr)
+static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
 {
-       struct ethtool_rxnfc info;
-       size_t info_size = sizeof(info);
+       struct ethtool_rxnfc cmd;
 
        if (!dev->ethtool_ops->set_rxnfc)
                return -EOPNOTSUPP;
 
-       /* struct ethtool_rxnfc was originally defined for
-        * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
-        * members.  User-space might still be using that
-        * definition. */
-       if (cmd == ETHTOOL_SRXFH)
-               info_size = (offsetof(struct ethtool_rxnfc, data) +
-                            sizeof(info.data));
-
-       if (copy_from_user(&info, useraddr, info_size))
+       if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
                return -EFAULT;
 
-       return dev->ethtool_ops->set_rxnfc(dev, &info);
+       return dev->ethtool_ops->set_rxnfc(dev, &cmd);
 }
 
-static int ethtool_get_rxnfc(struct net_device *dev,
-                            u32 cmd, void __user *useraddr)
+static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_rxnfc info;
-       size_t info_size = sizeof(info);
        const struct ethtool_ops *ops = dev->ethtool_ops;
        int ret;
        void *rule_buf = NULL;
@@ -251,22 +239,13 @@ static int ethtool_get_rxnfc(struct net_device *dev,
        if (!ops->get_rxnfc)
                return -EOPNOTSUPP;
 
-       /* struct ethtool_rxnfc was originally defined for
-        * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
-        * members.  User-space might still be using that
-        * definition. */
-       if (cmd == ETHTOOL_GRXFH)
-               info_size = (offsetof(struct ethtool_rxnfc, data) +
-                            sizeof(info.data));
-
-       if (copy_from_user(&info, useraddr, info_size))
+       if (copy_from_user(&info, useraddr, sizeof(info)))
                return -EFAULT;
 
        if (info.cmd == ETHTOOL_GRXCLSRLALL) {
                if (info.rule_cnt > 0) {
-                       if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
-                               rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
-                                                  GFP_USER);
+                       rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+                                          GFP_USER);
                        if (!rule_buf)
                                return -ENOMEM;
                }
@@ -277,7 +256,7 @@ static int ethtool_get_rxnfc(struct net_device *dev,
                goto err_out;
 
        ret = -EFAULT;
-       if (copy_to_user(useraddr, &info, info_size))
+       if (copy_to_user(useraddr, &info, sizeof(info)))
                goto err_out;
 
        if (rule_buf) {
@@ -311,7 +290,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        if (regs.len > reglen)
                regs.len = reglen;
 
-       regbuf = kzalloc(reglen, GFP_USER);
+       regbuf = kmalloc(reglen, GFP_USER);
        if (!regbuf)
                return -ENOMEM;
 
@@ -1132,12 +1111,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GRXCLSRLCNT:
        case ETHTOOL_GRXCLSRULE:
        case ETHTOOL_GRXCLSRLALL:
-               rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
+               rc = ethtool_get_rxnfc(dev, useraddr);
                break;
        case ETHTOOL_SRXFH:
        case ETHTOOL_SRXCLSRLDEL:
        case ETHTOOL_SRXCLSRLINS:
-               rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
+               rc = ethtool_set_rxnfc(dev, useraddr);
                break;
        case ETHTOOL_GGRO:
                rc = ethtool_get_gro(dev, useraddr);
index f911e665a7dbb2eb43981603439381782d98df8c..16ad45d4882b56a2c531a0a944bc31c690ff5a4b 100644 (file)
@@ -38,7 +38,7 @@
 
 int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
 {
-       int size, ct, err;
+       int size, err, ct;
 
        if (m->msg_namelen) {
                if (mode == VERIFY_READ) {
@@ -60,13 +60,14 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
        err = 0;
 
        for (ct = 0; ct < m->msg_iovlen; ct++) {
-               size_t len = iov[ct].iov_len;
-
-               if (len > INT_MAX - err) {
-                       len = INT_MAX - err;
-                       iov[ct].iov_len = len;
-               }
-               err += len;
+               err += iov[ct].iov_len;
+               /*
+                * Goal is not to verify user data, but to prevent returning
+                * negative value, which is interpreted as errno.
+                * Overflow is still possible, but it is harmless.
+                */
+               if (err < 0)
+                       return -EMSGSIZE;
        }
 
        return err;
index e69625084481254a792512cb50ddf05fe332057a..e587e6819698cbe7485fb336cb70177e3bf98ca4 100644 (file)
@@ -945,10 +945,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
 {
        struct hh_cache *hh;
        void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
-               = NULL;
-
-       if (neigh->dev->header_ops)
-               update = neigh->dev->header_ops->cache_update;
+               = neigh->dev->header_ops->cache_update;
 
        if (update) {
                for (hh = neigh->hh; hh; hh = hh->hh_next) {
index d5617d45deefe819d5509904d1d8c5a957c12972..427ded841224e6b7ce79cc91158efec24ab54b4a 100644 (file)
@@ -366,8 +366,7 @@ static ssize_t wireless_show(struct device *d, char *buf,
        const struct iw_statistics *iw;
        ssize_t ret = -EINVAL;
 
-       if (!rtnl_trylock())
-               return restart_syscall();
+       rtnl_lock();
        if (dev_isalive(dev)) {
                iw = get_wireless_stats(dev);
                if (iw)
index 9b264634acfd6233ebfa7369cc4537638377d06d..b7ba91b074b33995deee5278fb54c30d100d041b 100644 (file)
@@ -156,8 +156,6 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
                switch (cmsg->cmsg_type)
                {
                case SCM_RIGHTS:
-                       if (!sock->ops || sock->ops->family != PF_UNIX)
-                               goto error;
                        err=scm_fp_copy(cmsg, &p->fp);
                        if (err<0)
                                goto error;
index 283f4412847996317dc24f0c1f0e7aa7b617bcd4..ec85681a7dd83765878cd6c791538a7941732178 100644 (file)
@@ -2575,10 +2575,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
-               /* nskb and skb might have different headroom */
-               if (nskb->ip_summed == CHECKSUM_PARTIAL)
-                       nskb->csum_start += skb_headroom(nskb) - headroom;
-
                skb_reset_mac_header(nskb);
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
@@ -2709,7 +2705,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                return -E2BIG;
 
        headroom = skb_headroom(p);
-       nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
+       nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
        if (unlikely(!nskb))
                return -ENOMEM;
 
@@ -2730,7 +2726,6 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
        skb_shinfo(nskb)->frag_list = p;
        skb_shinfo(nskb)->gso_size = pinfo->gso_size;
-       pinfo->gso_size = 0;
        skb_header_release(p);
        nskb->prev = p;
 
index e48c85f3b95d2335b60a9320cdabc948680de175..a37debfeb1b2bcb3309d93fc5d45b5f82e202d6e 100644 (file)
@@ -140,10 +140,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                sk->sk_write_pending++;
-               sk_wait_event(sk, &current_timeo, sk->sk_err ||
-                                                 (sk->sk_shutdown & SEND_SHUTDOWN) ||
-                                                 (sk_stream_memory_free(sk) &&
-                                                 !vm_wait));
+               sk_wait_event(sk, &current_timeo, !sk->sk_err &&
+                                                 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
+                                                 sk_stream_memory_free(sk) &&
+                                                 vm_wait);
                sk->sk_write_pending--;
 
                if (vm_wait) {
index 48759983b2db2d16f7495fabf4dbdef68872a102..37731da4148164adc741da891a86b27e2484187d 100644 (file)
@@ -164,8 +164,7 @@ static __init int dccpprobe_init(void)
        if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
                goto err0;
 
-       ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
-                                       "dccp");
+       ret = register_jprobe(&dccp_send_probe);
        if (ret)
                goto err1;
 
index 5df7b5453556bb489bb7ee8fb17ca7e4a3e03cc7..7a58c87baf172f9384fab475f8243eab58716e58 100644 (file)
@@ -1555,8 +1555,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
                        if (r_len > sizeof(struct linkinfo_dn))
                                r_len = sizeof(struct linkinfo_dn);
 
-                       memset(&link, 0, sizeof(link));
-
                        switch(sock->state) {
                                case SS_CONNECTING:
                                        link.idn_linkstate = LL_CONNECTING;
index 85672e7cb69cbbc9a9b071d02501ca5eda27d261..0e0254fd767de359769b080ff4bacdcb12c52eb4 100644 (file)
@@ -296,14 +296,23 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        mutex_lock(&econet_mutex);
 
-        if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
-                mutex_unlock(&econet_mutex);
-                return -EINVAL;
-        }
-        addr.station = saddr->addr.station;
-        addr.net = saddr->addr.net;
-        port = saddr->port;
-        cb = saddr->cb;
+       if (saddr == NULL) {
+               struct econet_sock *eo = ec_sk(sk);
+
+               addr.station = eo->station;
+               addr.net     = eo->net;
+               port         = eo->port;
+               cb           = eo->cb;
+       } else {
+               if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
+                       mutex_unlock(&econet_mutex);
+                       return -EINVAL;
+               }
+               addr.station = saddr->addr.station;
+               addr.net = saddr->addr.net;
+               port = saddr->port;
+               cb = saddr->cb;
+       }
 
        /* Look for a device with the right network number. */
        dev = net2dev_map[addr.net];
@@ -341,6 +350,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
                eb = (struct ec_cb *)&skb->cb;
 
+               /* BUG: saddr may be NULL */
                eb->cookie = saddr->cookie;
                eb->sec = *saddr;
                eb->sent = ec_tx_done;
@@ -659,9 +669,6 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
        err = 0;
        switch (cmd) {
        case SIOCSIFADDR:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-
                edev = dev->ec_ptr;
                if (edev == NULL) {
                        /* Magic up a new one. */
index fc28ac2463cf7027cb73fbe8ae3508ba3308f89b..19a5f3ce53a2e268b929fa2e3d7ee099fee091dd 100644 (file)
@@ -1083,7 +1083,6 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
                }
                ip_mc_up(in_dev);
                /* fall through */
-       case NETDEV_NOTIFY_PEERS:
        case NETDEV_CHANGEADDR:
                /* Send gratuitous ARP to notify of link change */
                if (IN_DEV_ARP_NOTIFY(in_dev)) {
@@ -1359,19 +1358,14 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
 {
        int *valp = ctl->data;
        int val = *valp;
-       loff_t pos = *ppos;
        int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 
        if (write && *valp != val) {
                struct net *net = ctl->extra2;
 
                if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
-                       if (!rtnl_trylock()) {
-                               /* Restore the original values before restarting */
-                               *valp = val;
-                               *ppos = pos;
+                       if (!rtnl_trylock())
                                return restart_syscall();
-                       }
                        if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
                                inet_forward_change(net);
                        } else if (*valp) {
index 169da9336512753bb74f2a31be07c09707a3d5fc..d41e5de79a822b658b8bde74c3d88f96524f2166 100644 (file)
@@ -946,6 +946,7 @@ int igmp_rcv(struct sk_buff *skb)
                break;
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
+       case IGMPV3_HOST_MEMBERSHIP_REPORT:
                /* Is it our report looped back? */
                if (skb_rtable(skb)->fl.iif == 0)
                        break;
@@ -959,7 +960,6 @@ int igmp_rcv(struct sk_buff *skb)
                in_dev_put(in_dev);
                return pim_rcv_v1(skb);
 #endif
-       case IGMPV3_HOST_MEMBERSHIP_REPORT:
        case IGMP_DVMRP:
        case IGMP_TRACE:
        case IGMP_HOST_LEAVE_MESSAGE:
index 2ef9026526b84d3ea5a7081d734462bb78efaf0e..4d50daab0c3e7824dcf2fc35637ee36d1c031c7c 100644 (file)
@@ -476,8 +476,9 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
         * we can switch to copy when see the first bad fragment.
         */
        if (skb_has_frags(skb)) {
-               struct sk_buff *frag, *frag2;
+               struct sk_buff *frag;
                int first_len = skb_pagelen(skb);
+               int truesizes = 0;
 
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
@@ -490,18 +491,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                               goto slow_path_clean;
+                           goto slow_path;
 
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path_clean;
+                               goto slow_path;
 
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
                        }
-                       skb->truesize -= frag->truesize;
+                       truesizes += frag->truesize;
                }
 
                /* Everything is OK. Generate! */
@@ -511,6 +512,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                frag = skb_shinfo(skb)->frag_list;
                skb_frag_list_init(skb);
                skb->data_len = first_len - skb_headlen(skb);
+               skb->truesize -= truesizes;
                skb->len = first_len;
                iph->tot_len = htons(first_len);
                iph->frag_off = htons(IP_MF);
@@ -562,15 +564,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                }
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
                return err;
-
-slow_path_clean:
-               skb_walk_frags(skb, frag2) {
-                       if (frag2 == frag)
-                               break;
-                       frag2->sk = NULL;
-                       frag2->destructor = NULL;
-                       skb->truesize += frag2->truesize;
-               }
        }
 
 slow_path:
index 6c8f6c9e5cdb7de6eb0cf82d1f5588d5ef4bd5c7..5b1050a5d874e35ffd608f8e6d0bbb2f1a5204f4 100644 (file)
@@ -2712,11 +2712,6 @@ slow_output:
 
 EXPORT_SYMBOL_GPL(__ip_route_output_key);
 
-static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
-{
-       return NULL;
-}
-
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -2725,7 +2720,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .family                 =       AF_INET,
        .protocol               =       cpu_to_be16(ETH_P_IP),
        .destroy                =       ipv4_dst_destroy,
-       .check                  =       ipv4_blackhole_dst_check,
+       .check                  =       ipv4_dst_check,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
        .entries                =       ATOMIC_INIT(0),
 };
index 734fe94827dc7716e05581ff3013a2c41644fcda..f1813bc7108811a50ff8284775ad053623903a2e 100644 (file)
@@ -386,6 +386,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
         */
 
        mask = 0;
+       if (sk->sk_err)
+               mask = POLLERR;
 
        /*
         * POLLHUP is certainly not done right. But poll() doesn't
@@ -449,17 +451,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                                if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
                                        mask |= POLLOUT | POLLWRNORM;
                        }
-               } else
-                       mask |= POLLOUT | POLLWRNORM;
+               }
 
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
        }
-       /* This barrier is coupled with smp_wmb() in tcp_reset() */
-       smp_rmb();
-       if (sk->sk_err)
-               mask |= POLLERR;
-
        return mask;
 }
 
@@ -938,7 +934,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
                goto out_err;
 
        while (--iovlen >= 0) {
-               size_t seglen = iov->iov_len;
+               int seglen = iov->iov_len;
                unsigned char __user *from = iov->iov_base;
 
                iov++;
@@ -1338,7 +1334,6 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                sk_eat_skb(sk, skb, 0);
                if (!desc->count)
                        break;
-               tp->copied_seq = seq;
        }
        tp->copied_seq = seq;
 
@@ -1980,8 +1975,11 @@ adjudge_to_death:
                }
        }
        if (sk->sk_state != TCP_CLOSE) {
+               int orphan_count = percpu_counter_read_positive(
+                                               sk->sk_prot->orphan_count);
+
                sk_mem_reclaim(sk);
-               if (tcp_too_many_orphans(sk, 0)) {
+               if (tcp_too_many_orphans(sk, orphan_count)) {
                        if (net_ratelimit())
                                printk(KERN_INFO "TCP: too many of orphaned "
                                       "sockets\n");
@@ -2882,7 +2880,7 @@ void __init tcp_init(void)
 {
        struct sk_buff *skb = NULL;
        unsigned long nr_pages, limit;
-       int i, max_share, cnt;
+       int order, i, max_share;
 
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
 
@@ -2931,23 +2929,31 @@ void __init tcp_init(void)
                INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
        }
 
-
-       cnt = tcp_hashinfo.ehash_size;
-
-       tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
-       sysctl_tcp_max_orphans = cnt / 2;
-       sysctl_max_syn_backlog = max(128, cnt / 256);
+       /* Try to be a bit smarter and adjust defaults depending
+        * on available memory.
+        */
+       for (order = 0; ((1 << order) << PAGE_SHIFT) <
+                       (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
+                       order++)
+               ;
+       if (order >= 4) {
+               tcp_death_row.sysctl_max_tw_buckets = 180000;
+               sysctl_tcp_max_orphans = 4096 << (order - 4);
+               sysctl_max_syn_backlog = 1024;
+       } else if (order < 3) {
+               tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
+               sysctl_tcp_max_orphans >>= (3 - order);
+               sysctl_max_syn_backlog = 128;
+       }
 
        /* Set the pressure threshold to be a fraction of global memory that
         * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
-        * memory, with a floor of 128 pages, and a ceiling that prevents an
-        * integer overflow.
+        * memory, with a floor of 128 pages.
         */
        nr_pages = totalram_pages - totalhigh_pages;
        limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
        limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
        limit = max(limit, 128UL);
-       limit = min(limit, INT_MAX * 4UL / 3 / 2);
        sysctl_tcp_mem[0] = limit / 4 * 3;
        sysctl_tcp_mem[1] = limit;
        sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
index ce1ce82e46b1462299c40879a40c3320d6f545fb..d86784be7ab3d0160122cdd42f071cffaae82351 100644 (file)
@@ -3969,8 +3969,6 @@ static void tcp_reset(struct sock *sk)
        default:
                sk->sk_err = ECONNRESET;
        }
-       /* This barrier is coupled with smp_rmb() in tcp_poll() */
-       smp_wmb();
 
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_error_report(sk);
@@ -5701,9 +5699,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
                                /* tcp_ack considers this ACK as duplicate
                                 * and does not calculate rtt.
-                                * Force it here.
+                                * Fix it at least with timestamps.
                                 */
-                               tcp_ack_update_rtt(sk, 0, 0);
+                               if (tp->rx_opt.saw_tstamp &&
+                                   tp->rx_opt.rcv_tsecr && !tp->srtt)
+                                       tcp_ack_saw_tstamp(sk, 0);
 
                                if (tp->rx_opt.tstamp_ok)
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
index af83bdf559da50314c15c079147a4fab20257024..fcd278a7080e1656060cd5f57e38764bec3bdb74 100644 (file)
@@ -2037,9 +2037,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        int mib_idx;
        int fwd_rexmitting = 0;
 
-       if (!tp->packets_out)
-               return;
-
        if (!tp->lost_out)
                tp->retransmit_high = tp->snd_una;
 
index 57d550193df3e9d0433b1ca1e37a6f8716790a0b..cdb2ca7684d4ab0821ca5c1bcc2ad72fc3aa9651 100644 (file)
@@ -65,18 +65,18 @@ static void tcp_write_err(struct sock *sk)
 static int tcp_out_of_resources(struct sock *sk, int do_reset)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       int shift = 0;
+       int orphans = percpu_counter_read_positive(&tcp_orphan_count);
 
        /* If peer does not open window for long time, or did not transmit
         * anything for long time, penalize it. */
        if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
-               shift++;
+               orphans <<= 1;
 
        /* If some dubious ICMP arrived, penalize even more. */
        if (sk->sk_err_soft)
-               shift++;
+               orphans <<= 1;
 
-       if (tcp_too_many_orphans(sk, shift)) {
+       if (tcp_too_many_orphans(sk, orphans)) {
                if (net_ratelimit())
                        printk(KERN_INFO "Out of socket memory\n");
 
index 31db78ca746985165ac38118aa9ebc7e80850526..0fa9f70e4b1996f6af891f2984c1344675d62e2d 100644 (file)
@@ -1292,9 +1292,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 
        uh   = udp_hdr(skb);
        ulen = ntohs(uh->len);
-       saddr = ip_hdr(skb)->saddr;
-       daddr = ip_hdr(skb)->daddr;
-
        if (ulen > skb->len)
                goto short_packet;
 
@@ -1308,6 +1305,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (udp4_csum_init(skb, uh, proto))
                goto csum_error;
 
+       saddr = ip_hdr(skb)->saddr;
+       daddr = ip_hdr(skb)->daddr;
+
        if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
                return __udp4_lib_mcast_deliver(net, skb, uh,
                                saddr, daddr, udptable);
@@ -1832,14 +1832,12 @@ void __init udp_init(void)
        udp_table_init(&udp_table);
        /* Set the pressure threshold up by the same strategy of TCP. It is a
         * fraction of global memory that is up to 1/2 at 256 MB, decreasing
-        * toward zero with the amount of memory, with a floor of 128 pages,
-        * and a ceiling that prevents an integer overflow.
+        * toward zero with the amount of memory, with a floor of 128 pages.
         */
        nr_pages = totalram_pages - totalhigh_pages;
        limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
        limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
        limit = max(limit, 128UL);
-       limit = min(limit, INT_MAX * 4UL / 3 / 2);
        sysctl_udp_mem[0] = limit / 4 * 3;
        sysctl_udp_mem[1] = limit;
        sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
index e3a9a655e6adc4202c870a9bf7a89e38be31cc6d..74fb2eb833ec16719f5bbdf8ae379007e653abba 100644 (file)
@@ -71,7 +71,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
                if (xdst->u.rt.fl.oif == fl->oif &&     /*XXX*/
                    xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
                    xdst->u.rt.fl.fl4_src == fl->fl4_src &&
-                    !((xdst->u.rt.fl.fl4_tos ^ fl->fl4_tos) & IPTOS_RT_MASK) &&
+                   xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
                    xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
                        dst_clone(dst);
                        break;
@@ -83,7 +83,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
 
 static int xfrm4_get_tos(struct flowi *fl)
 {
-       return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
+       return fl->fl4_tos;
 }
 
 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
index d1f77ccea28cafd75592b190bfc7afd1940b8840..1fd0a3d775d26767dec15c78f599dc96b1c3c08d 100644 (file)
@@ -504,11 +504,8 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
        if (p == &net->ipv6.devconf_dflt->forwarding)
                return 0;
 
-       if (!rtnl_trylock()) {
-               /* Restore the original values before restarting */
-               *p = old;
+       if (!rtnl_trylock())
                return restart_syscall();
-       }
 
        if (p == &net->ipv6.devconf_all->forwarding) {
                __s32 newf = net->ipv6.devconf_all->forwarding;
@@ -3994,15 +3991,12 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
 {
        int *valp = ctl->data;
        int val = *valp;
-       loff_t pos = *ppos;
        int ret;
 
        ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 
        if (write)
                ret = addrconf_fixup_forwarding(ctl, valp, val);
-       if (ret)
-               *ppos = pos;
        return ret;
 }
 
@@ -4081,11 +4075,8 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
        if (p == &net->ipv6.devconf_dflt->disable_ipv6)
                return 0;
 
-       if (!rtnl_trylock()) {
-               /* Restore the original values before restarting */
-               *p = old;
+       if (!rtnl_trylock())
                return restart_syscall();
-       }
 
        if (p == &net->ipv6.devconf_all->disable_ipv6) {
                __s32 newf = net->ipv6.devconf_all->disable_ipv6;
@@ -4104,15 +4095,12 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
 {
        int *valp = ctl->data;
        int val = *valp;
-       loff_t pos = *ppos;
        int ret;
 
        ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 
        if (write)
                ret = addrconf_disable_ipv6(ctl, valp, val);
-       if (ret)
-               *ppos = pos;
        return ret;
 }
 
index eca3ef77f22760fa4bb0a0fbb05ec293b0409005..cd48801a8d6f465d546b61a2e9629bf6e2091714 100644 (file)
@@ -643,7 +643,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
        if (skb_has_frags(skb)) {
                int first_len = skb_pagelen(skb);
-               struct sk_buff *frag2;
+               int truesizes = 0;
 
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
@@ -655,18 +655,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                               goto slow_path_clean;
+                           goto slow_path;
 
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path_clean;
+                               goto slow_path;
 
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
+                               truesizes += frag->truesize;
                        }
-                       skb->truesize -= frag->truesize;
                }
 
                err = 0;
@@ -697,6 +697,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
                first_len = skb_pagelen(skb);
                skb->data_len = first_len - skb_headlen(skb);
+               skb->truesize -= truesizes;
                skb->len = first_len;
                ipv6_hdr(skb)->payload_len = htons(first_len -
                                                   sizeof(struct ipv6hdr));
@@ -759,15 +760,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                              IPSTATS_MIB_FRAGFAILS);
                dst_release(&rt->u.dst);
                return err;
-
-slow_path_clean:
-               skb_walk_frags(skb, frag2) {
-                       if (frag2 == frag)
-                               break;
-                       frag2->sk = NULL;
-                       frag2->destructor = NULL;
-                       skb->truesize += frag2->truesize;
-               }
        }
 
 slow_path:
index 1264ad0d9da1cdbb020fd2b7e0ffbb3af2fe469e..5a7f00cd15ce7e194a0c3f1c1cda956c0eae2843 100644 (file)
@@ -95,11 +95,9 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        fl.fl_ip_dport = otcph.source;
        security_skb_classify_flow(oldskb, &fl);
        dst = ip6_route_output(net, NULL, &fl);
-       if (dst == NULL || dst->error) {
-               dst_release(dst);
+       if (dst == NULL)
                return;
-       }
-       if (xfrm_lookup(net, &dst, &fl, NULL, 0))
+       if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0))
                return;
 
        hh_len = (dst->dev->hard_header_len + 15)&~15;
index bfc8737292bdb3438a38d030eb7c933ab8b941c9..4b6a539cb87ab7f1a2b82fe62a24b576f9062bf8 100644 (file)
@@ -63,7 +63,6 @@ struct nf_ct_frag6_queue
        struct inet_frag_queue  q;
 
        __be32                  id;             /* fragment id          */
-       u32                     user;
        struct in6_addr         saddr;
        struct in6_addr         daddr;
 
index e307517cbddcfa3767f7777b0e69aa55973119e8..d6fe7646a8ff7d8599c3565e6e6c9deb68732cef 100644 (file)
@@ -1561,13 +1561,14 @@ out:
  *     i.e. Path MTU discovery
  */
 
-static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
-                            struct net *net, u32 pmtu, int ifindex)
+void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
+                       struct net_device *dev, u32 pmtu)
 {
        struct rt6_info *rt, *nrt;
+       struct net *net = dev_net(dev);
        int allfrag = 0;
 
-       rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
+       rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
        if (rt == NULL)
                return;
 
@@ -1635,27 +1636,6 @@ out:
        dst_release(&rt->u.dst);
 }
 
-void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
-                       struct net_device *dev, u32 pmtu)
-{
-       struct net *net = dev_net(dev);
-
-       /*
-        * RFC 1981 states that a node "MUST reduce the size of the packets it
-        * is sending along the path" that caused the Packet Too Big message.
-        * Since it's not possible in the general case to determine which
-        * interface was used to send the original packet, we update the MTU
-        * on the interface that will be used to send future packets. We also
-        * update the MTU on the interface that received the Packet Too Big in
-        * case the original packet was forced out that interface with
-        * SO_BINDTODEVICE or similar. This is the next best thing to the
-        * correct behaviour, which would be to update the MTU on all
-        * interfaces.
-        */
-       rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
-       rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
-}
-
 /*
  *     Misc support functions
  */
index b6cef980095fe745340dc78ce550ed20985c91e9..dd35641835f408d161a7d1c70550a15f1a0309ac 100644 (file)
@@ -810,8 +810,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
        if (err < 0) {
-               irias_delete_object(self->ias_obj);
-               self->ias_obj = NULL;
+               kfree(self->ias_obj->name);
+               kfree(self->ias_obj);
                return err;
        }
 
index f7d6150e0903cb7ad0d873ed22958f0c5f97f9df..294e34d3517cb87cc191c1cfec9d7b39de30245d 100644 (file)
@@ -501,8 +501,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
                IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
 
                /* Make sure the string is null-terminated */
-               if (n + value_len < skb->len)
-                       fp[n + value_len] = 0x00;
+               fp[n+value_len] = 0x00;
                IRDA_DEBUG(4, "Got string %s\n", fp+n);
 
                /* Will truncate to IAS_MAX_STRING bytes */
index cfef331a41d89c2dc986fc8f627966e9cc61f41f..315ead3cb926beb93550b9a89f54b8e7497eb504 100644 (file)
@@ -1101,7 +1101,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
        memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
        le16_to_cpus(&val_len); n+=2;
 
-       if (val_len >= 1016) {
+       if (val_len > 1016) {
                IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
                return -RSP_INVALID_COMMAND_FORMAT;
        }
index 71cd38c1a67fcfe241215effff78efba06be966c..fc1a20565e2d7ab3cbef897ac4ed2d5793189a62 100644 (file)
@@ -298,8 +298,6 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
 
        p.pi = pi;     /* In case handler needs to know */
        p.pl = buf[1]; /* Extract length of value */
-       if (p.pl > 32)
-               p.pl = 32;
 
        IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__,
                   p.pi, p.pl);
@@ -320,7 +318,7 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
                   (__u8) str[0], (__u8) str[1]);
 
        /* Null terminate string */
-       str[p.pl] = '\0';
+       str[p.pl+1] = '\0';
 
        p.pv.c = str; /* Handler will need to take a copy */
 
index 2da8d14c28ce93eec8f3bbbe16e1f01c3571358e..7aa4fd170104452be2d9daa1194bc313dfd759eb 100644 (file)
@@ -977,8 +977,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
 {
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
-       unsigned int opt;
-       int rc = -EINVAL;
+       int rc = -EINVAL, opt;
 
        lock_sock(sk);
        if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
index ea367cf9a052263a8db0a46aad61a5f8dde92db8..4d5543af3123e02201f872f7ac4f3d1cc45ea88d 100644 (file)
@@ -15,12 +15,8 @@ comment "CFG80211 needs to be enabled for MAC80211"
 
 if MAC80211 != n
 
-config MAC80211_HAS_RC
-       def_bool n
-
 config MAC80211_RC_PID
        bool "PID controller based rate control algorithm" if EMBEDDED
-       select MAC80211_HAS_RC
        ---help---
          This option enables a TX rate control algorithm for
          mac80211 that uses a PID controller to select the TX
@@ -28,14 +24,12 @@ config MAC80211_RC_PID
 
 config MAC80211_RC_MINSTREL
        bool "Minstrel" if EMBEDDED
-       select MAC80211_HAS_RC
        default y
        ---help---
          This option enables the 'minstrel' TX rate control algorithm
 
 choice
        prompt "Default rate control algorithm"
-       depends on MAC80211_HAS_RC
        default MAC80211_RC_DEFAULT_MINSTREL
        ---help---
          This option selects the default rate control algorithm
@@ -68,9 +62,6 @@ config MAC80211_RC_DEFAULT
 
 endif
 
-comment "Some wireless drivers require a rate control algorithm"
-       depends on MAC80211_HAS_RC=n
-
 config MAC80211_MESH
        bool "Enable mac80211 mesh networking (pre-802.11s) support"
        depends on MAC80211 && EXPERIMENTAL
index 2e08921b7cf6ecf1994c3deeb192b89aa3fac936..89e238b001de936e4585f2eae73b3d09ae8cb3f7 100644 (file)
@@ -177,10 +177,10 @@ static void sta_addba_resp_timer_expired(unsigned long data)
 
        /* check if the TID waits for addBA response */
        spin_lock_bh(&sta->lock);
-       if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
-                      HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
+       if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
                                                HT_ADDBA_REQUESTED_MSK) {
                spin_unlock_bh(&sta->lock);
+               *state = HT_AGG_STATE_IDLE;
 #ifdef CONFIG_MAC80211_HT_DEBUG
                printk(KERN_DEBUG "timer expired on tid %d but we are not "
                                "(or no longer) expecting addBA response there",
index ca62bfe229dd501f8064698aa8605c552dc1601f..5a461641e94ee8cfbd6d9c7d82b2f7dcc1f235ea 100644 (file)
@@ -264,7 +264,6 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_DISABLE_11N       = BIT(4),
        IEEE80211_STA_CSA_RECEIVED      = BIT(5),
        IEEE80211_STA_MFP_ENABLED       = BIT(6),
-       IEEE80211_STA_NULLFUNC_ACKED    = BIT(7),
 };
 
 /* flags for MLME request */
index 19fbd25a705b22d3d220bc8f7ea6266a20564c68..797f53942e5f870093ff771b3d3bb523c664044a 100644 (file)
@@ -441,7 +441,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        rcu_read_lock();
 
        sband = local->hw.wiphy->bands[info->band];
-       fc = hdr->frame_control;
 
        sta = sta_info_get(local, hdr->addr1);
 
@@ -523,20 +522,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                        local->dot11FailedCount++;
        }
 
-       if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
-           (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
-           !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
-           local->ps_sdata && !(local->scanning)) {
-               if (info->flags & IEEE80211_TX_STAT_ACK) {
-                       local->ps_sdata->u.mgd.flags |=
-                               IEEE80211_STA_NULLFUNC_ACKED;
-                       ieee80211_queue_work(&local->hw,
-                                            &local->dynamic_ps_enable_work);
-               } else
-                       mod_timer(&local->dynamic_ps_timer, jiffies +
-                                 msecs_to_jiffies(10));
-       }
-
        /* this was a transmitted frame, but now we want to reuse it */
        skb_orphan(skb);
 
index 5bea319e3e54b7d17cad18f4a71a353b3b5b6f68..6cae2954963d3bd8abb5599a20932e28a5bad4e7 100644 (file)
@@ -269,6 +269,12 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
        if (wk->bss->wmm_used)
                wmm = 1;
 
+       /* get all rates supported by the device and the AP as
+        * some APs don't like getting a superset of their rates
+        * in the association request (e.g. D-Link DAP 1353 in
+        * b-only mode) */
+       rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
+
        if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
            (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
                capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
@@ -303,17 +309,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
        *pos++ = wk->ssid_len;
        memcpy(pos, wk->ssid, wk->ssid_len);
 
-       if (wk->bss->supp_rates_len) {
-               /* get all rates supported by the device and the AP as
-                * some APs don't like getting a superset of their rates
-                * in the association request (e.g. D-Link DAP 1353 in
-                * b-only mode) */
-               rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
-       } else {
-               rates = ~0;
-               rates_len = sband->n_bitrates;
-       }
-
        /* add all rates which were marked to be used above */
        supp_rates_len = rates_len;
        if (supp_rates_len > 8)
@@ -655,11 +650,8 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
        } else {
                if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
                        ieee80211_send_nullfunc(local, sdata, 1);
-
-               if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
-                       conf->flags |= IEEE80211_CONF_PS;
-                       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
-               }
+               conf->flags |= IEEE80211_CONF_PS;
+               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
        }
 }
 
@@ -750,7 +742,6 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
                container_of(work, struct ieee80211_local,
                             dynamic_ps_enable_work);
        struct ieee80211_sub_if_data *sdata = local->ps_sdata;
-       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
        /* can only happen when PS was just disabled anyway */
        if (!sdata)
@@ -759,16 +750,11 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
        if (local->hw.conf.flags & IEEE80211_CONF_PS)
                return;
 
-       if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
-           (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
+       if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
                ieee80211_send_nullfunc(local, sdata, 1);
 
-       if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
-           (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
-               ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
-               local->hw.conf.flags |= IEEE80211_CONF_PS;
-               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
-       }
+       local->hw.conf.flags |= IEEE80211_CONF_PS;
+       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 }
 
 void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -2472,7 +2458,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        list_add(&wk->list, &ifmgd->work_list);
 
        ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
-       ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
 
        for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
                if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
index 38499c4e7106c514b22efbf679c4bd957f6a61ef..16c6cdc6e8af363b8ffeda0e6f301dead6440c83 100644 (file)
@@ -1220,8 +1220,7 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
                     (rx->key || rx->sdata->drop_unencrypted)))
                return -EACCES;
        if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
-               if (unlikely(!ieee80211_has_protected(fc) &&
-                            ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
+               if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
                             rx->key))
                        return -EACCES;
                /* BIP does not use Protected field, so need to check MMIE */
@@ -1591,7 +1590,6 @@ static ieee80211_rx_result debug_noinline
 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
 {
        struct net_device *dev = rx->dev;
-       struct ieee80211_local *local = rx->local;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        __le16 fc = hdr->frame_control;
        int err;
@@ -1614,13 +1612,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += rx->skb->len;
 
-       if (ieee80211_is_data(hdr->frame_control) &&
-           !is_multicast_ether_addr(hdr->addr1) &&
-           local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
-               mod_timer(&local->dynamic_ps_timer, jiffies +
-                         msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
-       }
-
        ieee80211_deliver_skb(rx);
 
        return RX_QUEUED;
@@ -1818,11 +1809,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                        return RX_CONTINUE;
                }
                break;
-       case WLAN_CATEGORY_MESH_PLINK:
-       case WLAN_CATEGORY_MESH_PATH_SEL:
-               if (ieee80211_vif_is_mesh(&sdata->vif))
-                       return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
-               break;
        default:
                /* do not process rejected action frames */
                if (mgmt->u.action.category & 0x80)
index 169111ad6c9947431773a39b27e57ddcc638ce63..1a4190947970b45f3f945ce7ddafceeb91f3fad9 100644 (file)
@@ -62,7 +62,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
                          bool beacon)
 {
        struct ieee80211_bss *bss;
-       int clen, srlen;
+       int clen;
        s32 signal = 0;
 
        if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -94,24 +94,23 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
        if (bss->dtim_period == 0)
                bss->dtim_period = 1;
 
-       /* replace old supported rates if we get new values */
-       srlen = 0;
+       bss->supp_rates_len = 0;
        if (elems->supp_rates) {
-               clen = IEEE80211_MAX_SUPP_RATES;
+               clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
                if (clen > elems->supp_rates_len)
                        clen = elems->supp_rates_len;
-               memcpy(bss->supp_rates, elems->supp_rates, clen);
-               srlen += clen;
+               memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
+                      clen);
+               bss->supp_rates_len += clen;
        }
        if (elems->ext_supp_rates) {
-               clen = IEEE80211_MAX_SUPP_RATES - srlen;
+               clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
                if (clen > elems->ext_supp_rates_len)
                        clen = elems->ext_supp_rates_len;
-               memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen);
-               srlen += clen;
+               memcpy(&bss->supp_rates[bss->supp_rates_len],
+                      elems->ext_supp_rates, clen);
+               bss->supp_rates_len += clen;
        }
-       if (srlen)
-               bss->supp_rates_len = srlen;
 
        bss->wmm_used = elems->wmm_param || elems->wmm_info;
 
@@ -409,16 +408,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        if (local->scan_req)
                return -EBUSY;
 
-       if (req != local->int_scan_req &&
-           sdata->vif.type == NL80211_IFTYPE_STATION &&
-           !list_empty(&ifmgd->work_list)) {
-               /* actually wait for the work it's doing to finish/time out */
-               set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
-               local->scan_req = req;
-               local->scan_sdata = sdata;
-               return 0;
-       }
-
        if (local->ops->hw_scan) {
                u8 *ies;
                int ielen;
@@ -439,6 +428,14 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        local->scan_req = req;
        local->scan_sdata = sdata;
 
+       if (req != local->int_scan_req &&
+           sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !list_empty(&ifmgd->work_list)) {
+               /* actually wait for the work it's doing to finish/time out */
+               set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
+               return 0;
+       }
+
        if (local->ops->hw_scan)
                __set_bit(SCAN_HW_SCANNING, &local->scanning);
        else
index b1d79046257b39137f21f6e7c961064d906fa98a..441f68e3f3790c5a0c9012927f25901643f87ce2 100644 (file)
@@ -496,8 +496,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        struct ieee80211_hdr *hdr = (void *)tx->skb->data;
        struct ieee80211_supported_band *sband;
        struct ieee80211_rate *rate;
-       int i;
-       u32 len;
+       int i, len;
        bool inval = false, rts = false, short_preamble = false;
        struct ieee80211_tx_rate_control txrc;
        u32 sta_flags;
@@ -506,7 +505,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        sband = tx->local->hw.wiphy->bands[tx->channel->band];
 
-       len = min_t(u32, tx->skb->len + FCS_LEN,
+       len = min_t(int, tx->skb->len + FCS_LEN,
                         tx->local->hw.wiphy->frag_threshold);
 
        /* set up the tx rate control struct we give the RC algo */
@@ -1882,7 +1881,6 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
 void ieee80211_tx_pending(unsigned long data)
 {
        struct ieee80211_local *local = (struct ieee80211_local *)data;
-       struct ieee80211_sub_if_data *sdata;
        unsigned long flags;
        int i;
        bool txok;
@@ -1923,11 +1921,6 @@ void ieee80211_tx_pending(unsigned long data)
                        if (!txok)
                                break;
                }
-
-               if (skb_queue_empty(&local->pending[i]))
-                       list_for_each_entry_rcu(sdata, &local->interfaces, list)
-                               netif_tx_wake_queue(
-                                       netdev_get_tx_queue(sdata->dev, i));
        }
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
index 31b10850905fced1908ae098ad94dc62a911c3b9..553cffe9ab37045acf717e6b11022bc0381fc0e3 100644 (file)
@@ -280,13 +280,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
                /* someone still has this queue stopped */
                return;
 
-       if (skb_queue_empty(&local->pending[queue])) {
-               rcu_read_lock();
-               list_for_each_entry_rcu(sdata, &local->interfaces, list)
-                       netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
-               rcu_read_unlock();
-       } else
+       if (!skb_queue_empty(&local->pending[queue]))
                tasklet_schedule(&local->tx_pending_tasklet);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sdata, &local->interfaces, list)
+               netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+       rcu_read_unlock();
 }
 
 void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -1137,14 +1137,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                }
        }
 
-       rcu_read_lock();
-       if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
-               list_for_each_entry_rcu(sta, &local->sta_list, list) {
-                       ieee80211_sta_tear_down_BA_sessions(sta);
-               }
-       }
-       rcu_read_unlock();
-
        /* add back keys */
        list_for_each_entry(sdata, &local->interfaces, list)
                if (netif_running(sdata->dev))
index 95682e56902398486bec7fe20432db6c98cc9535..27c30cf933daa9092def2dfc342f8f05ef036d5f 100644 (file)
@@ -146,7 +146,6 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
        hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
 
        ct_write_lock(hash);
-       spin_lock(&cp->lock);
 
        if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
                list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
@@ -159,7 +158,6 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
                ret = 0;
        }
 
-       spin_unlock(&cp->lock);
        ct_write_unlock(hash);
 
        return ret;
@@ -179,7 +177,6 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
        hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
 
        ct_write_lock(hash);
-       spin_lock(&cp->lock);
 
        if (cp->flags & IP_VS_CONN_F_HASHED) {
                list_del(&cp->c_list);
@@ -189,7 +186,6 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
        } else
                ret = 0;
 
-       spin_unlock(&cp->lock);
        ct_write_unlock(hash);
 
        return ret;
index 8df3477450c2f78d113ffad5f8e20955191bb104..1374179bc503395533e77c7344ce3a53ab80ac5b 100644 (file)
@@ -1173,8 +1173,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
        if (!hash) {
                *vmalloced = 1;
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-               hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
-                                PAGE_KERNEL);
+               hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
        }
 
        if (hash && nulls)
index 2f181aa07b0567dc0f443ed680146c5ffa6d8b07..eb0ceb8465270d355d44842b1b82e91e41b6830e 100644 (file)
@@ -173,10 +173,10 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
 
 static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
 {
-       e->index %= ip_pkt_list_tot;
        e->stamps[e->index++] = jiffies;
        if (e->index > e->nstamps)
                e->nstamps = e->index;
+       e->index %= ip_pkt_list_tot;
        list_move_tail(&e->lru_list, &t->lru_list);
 }
 
@@ -260,7 +260,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
                for (i = 0; i < e->nstamps; i++) {
                        if (info->seconds && time_after(time, e->stamps[i]))
                                continue;
-                       if (!info->hit_count || ++hits >= info->hit_count) {
+                       if (++hits >= info->hit_count) {
                                ret = !ret;
                                break;
                        }
index 5a7dcdf2b1dec4570ade1b2919b65589a5288db5..19e98007691cce17b68e3fc29896b7dc3626a526 100644 (file)
@@ -1363,7 +1363,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        struct netlink_sock *nlk = nlk_sk(sk);
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
-       struct sk_buff *skb, *data_skb;
+       struct sk_buff *skb, *frag __maybe_unused = NULL;
        int err;
 
        if (flags&MSG_OOB)
@@ -1375,35 +1375,45 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        if (skb == NULL)
                goto out;
 
-       data_skb = skb;
-
 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
        if (unlikely(skb_shinfo(skb)->frag_list)) {
+               bool need_compat = !!(flags & MSG_CMSG_COMPAT);
+
                /*
-                * If this skb has a frag_list, then here that means that we
-                * will have to use the frag_list skb's data for compat tasks
-                * and the regular skb's data for normal (non-compat) tasks.
+                * If this skb has a frag_list, then here that means that
+                * we will have to use the frag_list skb for compat tasks
+                * and the regular skb for non-compat tasks.
                 *
-                * If we need to send the compat skb, assign it to the
-                * 'data_skb' variable so that it will be used below for data
-                * copying. We keep 'skb' for everything else, including
-                * freeing both later.
+                * The skb might (and likely will) be cloned, so we can't
+                * just reset frag_list and go on with things -- we need to
+                * keep that. For the compat case that's easy -- simply get
+                * a reference to the compat skb and free the regular one
+                * including the frag. For the non-compat case, we need to
+                * avoid sending the frag to the user -- so assign NULL but
+                * restore it below before freeing the skb.
                 */
-               if (flags & MSG_CMSG_COMPAT)
-                       data_skb = skb_shinfo(skb)->frag_list;
+               if (need_compat) {
+                       struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
+                       skb_get(compskb);
+                       kfree_skb(skb);
+                       skb = compskb;
+               } else {
+                       frag = skb_shinfo(skb)->frag_list;
+                       skb_shinfo(skb)->frag_list = NULL;
+               }
        }
 #endif
 
        msg->msg_namelen = 0;
 
-       copied = data_skb->len;
+       copied = skb->len;
        if (len < copied) {
                msg->msg_flags |= MSG_TRUNC;
                copied = len;
        }
 
-       skb_reset_transport_header(data_skb);
-       err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
+       skb_reset_transport_header(skb);
+       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 
        if (msg->msg_name) {
                struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1423,7 +1433,11 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        }
        siocb->scm->creds = *NETLINK_CREDS(skb);
        if (flags & MSG_TRUNC)
-               copied = data_skb->len;
+               copied = skb->len;
+
+#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
+       skb_shinfo(skb)->frag_list = frag;
+#endif
 
        skb_free_datagram(sk, skb);
 
index 519ff9d46e40fb973a192a9cec779f29ae75a286..f60c0c2aacba5f1a99fe4a2fe159f43a09b78f07 100644 (file)
@@ -67,8 +67,6 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol)
        struct phonet_protocol *pnp;
        int err;
 
-       if (!net_eq(net, &init_net))
-               return -EAFNOSUPPORT;
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
@@ -355,8 +353,6 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
        struct sockaddr_pn sa;
        u16 len;
 
-       if (!net_eq(net, &init_net))
-               goto out;
        /* check we have at least a full Phonet header */
        if (!pskb_pull(skb, sizeof(struct phonethdr)))
                goto out;
index 9cdd35e787594c2fb5fe04548a2f471d2f046cce..5f32d217535b4695667dc3e75ea3e0bcaab9a15e 100644 (file)
@@ -224,13 +224,12 @@ static void pipe_grant_credits(struct sock *sk)
 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 {
        struct pep_sock *pn = pep_sk(sk);
-       struct pnpipehdr *hdr;
+       struct pnpipehdr *hdr = pnp_hdr(skb);
        int wake = 0;
 
        if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
                return -EINVAL;
 
-       hdr = pnp_hdr(skb);
        if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
                LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
                                (unsigned)hdr->data[0]);
index 5a2275c4ee79d8ad6b4432916af37e73982c92ea..5f42f30dd1682bdc2427e7b387390fe231ab305e 100644 (file)
@@ -246,11 +246,7 @@ static struct notifier_block phonet_device_notifier = {
 /* Per-namespace Phonet devices handling */
 static int phonet_init_net(struct net *net)
 {
-       struct phonet_net *pnn;
-
-       if (!net_eq(net, &init_net))
-               return 0;
-       pnn = kmalloc(sizeof(*pnn), GFP_KERNEL);
+       struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL);
        if (!pnn)
                return -ENOMEM;
 
@@ -267,13 +263,9 @@ static int phonet_init_net(struct net *net)
 
 static void phonet_exit_net(struct net *net)
 {
-       struct phonet_net *pnn;
+       struct phonet_net *pnn = net_generic(net, phonet_net_id);
        struct net_device *dev;
 
-       if (!net_eq(net, &init_net))
-               return;
-       pnn = net_generic(net, phonet_net_id);
-
        rtnl_lock();
        for_each_netdev(net, dev)
                phonet_device_destroy(dev);
index 7acab1ea4fc53ded176566ca91660151a8faddc8..d21fd357661022d7b451d26ad93f43301f12b2ac 100644 (file)
@@ -68,8 +68,6 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
        int err;
        u8 pnaddr;
 
-       if (!net_eq(net, &init_net))
-               return -EOPNOTSUPP;
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
@@ -126,16 +124,12 @@ nla_put_failure:
 
 static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       struct net *net = sock_net(skb->sk);
        struct phonet_device_list *pndevs;
        struct phonet_device *pnd;
        int dev_idx = 0, dev_start_idx = cb->args[0];
        int addr_idx = 0, addr_start_idx = cb->args[1];
 
-       if (!net_eq(net, &init_net))
-               goto skip;
-
-       pndevs = phonet_device_list(net);
+       pndevs = phonet_device_list(sock_net(skb->sk));
        spin_lock_bh(&pndevs->lock);
        list_for_each_entry(pnd, &pndevs->list, list) {
                u8 addr;
@@ -160,7 +154,6 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 
 out:
        spin_unlock_bh(&pndevs->lock);
-skip:
        cb->args[0] = dev_idx;
        cb->args[1] = addr_idx;
 
index b442a481a12e65b3c284bf0f1883ce5dd75246e1..36790122dfd4c3cb869960ddb2141cd235973e16 100644 (file)
@@ -56,17 +56,30 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
        unsigned long ret;
        void *addr;
 
-       addr = kmap(page);
-       if (to_user) {
+       if (to_user)
                rds_stats_add(s_copy_to_user, bytes);
-               ret = copy_to_user(ptr, addr + offset, bytes);
-       } else {
+       else
                rds_stats_add(s_copy_from_user, bytes);
-               ret = copy_from_user(addr + offset, ptr, bytes);
+
+       addr = kmap_atomic(page, KM_USER0);
+       if (to_user)
+               ret = __copy_to_user_inatomic(ptr, addr + offset, bytes);
+       else
+               ret = __copy_from_user_inatomic(addr + offset, ptr, bytes);
+       kunmap_atomic(addr, KM_USER0);
+
+       if (ret) {
+               addr = kmap(page);
+               if (to_user)
+                       ret = copy_to_user(ptr, addr + offset, bytes);
+               else
+                       ret = copy_from_user(addr + offset, ptr, bytes);
+               kunmap(page);
+               if (ret)
+                       return -EFAULT;
        }
-       kunmap(page);
 
-       return ret ? -EFAULT : 0;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(rds_page_copy_user);
 
index 6b09b941d3a77b220d88f288844aa15b7efa3362..8dc83d2caa58d02d7036ea697a5fac65f470b564 100644 (file)
@@ -447,7 +447,7 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
                goto out;
        }
 
-       if (args->nr_local > UIO_MAXIOV) {
+       if (args->nr_local > (u64)UINT_MAX) {
                ret = -EMSGSIZE;
                goto out;
        }
index 6a2654ad3628cfa103f80c40735abd8cb0782f6c..fdff33c7b432448b43d31ce76fc3fd224156f9ab 100644 (file)
@@ -296,7 +296,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
 {
        struct rds_notifier *notifier;
-       struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
+       struct rds_rdma_notify cmsg;
        unsigned int count = 0, max_messages = ~0U;
        unsigned long flags;
        LIST_HEAD(copy);
index 7d188bca2eec2f0533d67d8b6b1880fde0d91d7b..502cce76621d07abdd624798c5c76e3802db63aa 100644 (file)
@@ -677,7 +677,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
                return -EINVAL;
 
-       if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
+       if (addr->srose_ndigis > ROSE_MAX_DIGIS)
                return -EINVAL;
 
        if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -737,7 +737,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
        if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
                return -EINVAL;
 
-       if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
+       if (addr->srose_ndigis > ROSE_MAX_DIGIS)
                return -EINVAL;
 
        /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
index f9fc6ec1fef66e4c9a445da367b7a3f42a826ca7..e7f796aec657f90089a4872ab79c831b62bde467 100644 (file)
@@ -152,24 +152,21 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
+       struct tc_gact opt;
        struct tcf_gact *gact = a->priv;
-       struct tc_gact opt = {
-               .index   = gact->tcf_index,
-               .refcnt  = gact->tcf_refcnt - ref,
-               .bindcnt = gact->tcf_bindcnt - bind,
-               .action  = gact->tcf_action,
-       };
        struct tcf_t t;
 
+       opt.index = gact->tcf_index;
+       opt.refcnt = gact->tcf_refcnt - ref;
+       opt.bindcnt = gact->tcf_bindcnt - bind;
+       opt.action = gact->tcf_action;
        NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
 #ifdef CONFIG_GACT_PROB
        if (gact->tcfg_ptype) {
-               struct tc_gact_p p_opt = {
-                       .paction = gact->tcfg_paction,
-                       .pval    = gact->tcfg_pval,
-                       .ptype   = gact->tcfg_ptype,
-               };
-
+               struct tc_gact_p p_opt;
+               p_opt.paction = gact->tcfg_paction;
+               p_opt.pval = gact->tcfg_pval;
+               p_opt.ptype = gact->tcfg_ptype;
                NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
        }
 #endif
index 5e49286548383d41609ff92b51e0cc1ed2b396c4..b9aaab4e03548a848c4f9bba9b527803fa602157 100644 (file)
@@ -205,16 +205,15 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_mirred *m = a->priv;
-       struct tc_mirred opt = {
-               .index   = m->tcf_index,
-               .action  = m->tcf_action,
-               .refcnt  = m->tcf_refcnt - ref,
-               .bindcnt = m->tcf_bindcnt - bind,
-               .eaction = m->tcfm_eaction,
-               .ifindex = m->tcfm_ifindex,
-       };
+       struct tc_mirred opt;
        struct tcf_t t;
 
+       opt.index = m->tcf_index;
+       opt.action = m->tcf_action;
+       opt.refcnt = m->tcf_refcnt - ref;
+       opt.bindcnt = m->tcf_bindcnt - bind;
+       opt.eaction = m->tcfm_eaction;
+       opt.ifindex = m->tcfm_ifindex;
        NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
        t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
index 047c234bfabf83f2f1713e465f03b61296e6b125..d885ba311564d9a766fce6eac37edbaf0ed82b71 100644 (file)
@@ -240,7 +240,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
                        iph->saddr = new_addr;
 
                inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
-                                        0);
+                                        1);
                break;
        }
        default:
@@ -261,29 +261,40 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_nat *p = a->priv;
-       struct tc_nat opt = {
-               .old_addr = p->old_addr,
-               .new_addr = p->new_addr,
-               .mask     = p->mask,
-               .flags    = p->flags,
-
-               .index    = p->tcf_index,
-               .action   = p->tcf_action,
-               .refcnt   = p->tcf_refcnt - ref,
-               .bindcnt  = p->tcf_bindcnt - bind,
-       };
+       struct tc_nat *opt;
        struct tcf_t t;
+       int s;
 
-       NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
+       s = sizeof(*opt);
+
+       /* netlink spinlocks held above us - must use ATOMIC */
+       opt = kzalloc(s, GFP_ATOMIC);
+       if (unlikely(!opt))
+               return -ENOBUFS;
+
+       opt->old_addr = p->old_addr;
+       opt->new_addr = p->new_addr;
+       opt->mask = p->mask;
+       opt->flags = p->flags;
+
+       opt->index = p->tcf_index;
+       opt->action = p->tcf_action;
+       opt->refcnt = p->tcf_refcnt - ref;
+       opt->bindcnt = p->tcf_bindcnt - bind;
+
+       NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
        t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
        NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
 
+       kfree(opt);
+
        return skb->len;
 
 nla_put_failure:
        nlmsg_trim(skb, b);
+       kfree(opt);
        return -1;
 }
 
index e02a4d04d16a61a7232075d9d9582ea285180f22..723964c3ee4ff78c4e442db790d960b261fd04df 100644 (file)
@@ -340,19 +340,22 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_police *police = a->priv;
-       struct tc_police opt = {
-               .index = police->tcf_index,
-               .action = police->tcf_action,
-               .mtu = police->tcfp_mtu,
-               .burst = police->tcfp_burst,
-               .refcnt = police->tcf_refcnt - ref,
-               .bindcnt = police->tcf_bindcnt - bind,
-       };
-
+       struct tc_police opt;
+
+       opt.index = police->tcf_index;
+       opt.action = police->tcf_action;
+       opt.mtu = police->tcfp_mtu;
+       opt.burst = police->tcfp_burst;
+       opt.refcnt = police->tcf_refcnt - ref;
+       opt.bindcnt = police->tcf_bindcnt - bind;
        if (police->tcfp_R_tab)
                opt.rate = police->tcfp_R_tab->rate;
+       else
+               memset(&opt.rate, 0, sizeof(opt.rate));
        if (police->tcfp_P_tab)
                opt.peakrate = police->tcfp_P_tab->rate;
+       else
+               memset(&opt.peakrate, 0, sizeof(opt.peakrate));
        NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
        if (police->tcfp_result)
                NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
index 41c8a445f1f23f616676466246f1b4a2141c5f37..8daa1ebc7413b971dafde75606af039d52a7e83e 100644 (file)
@@ -163,14 +163,13 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_defact *d = a->priv;
-       struct tc_defact opt = {
-               .index   = d->tcf_index,
-               .refcnt  = d->tcf_refcnt - ref,
-               .bindcnt = d->tcf_bindcnt - bind,
-               .action  = d->tcf_action,
-       };
+       struct tc_defact opt;
        struct tcf_t t;
 
+       opt.index = d->tcf_index;
+       opt.refcnt = d->tcf_refcnt - ref;
+       opt.bindcnt = d->tcf_bindcnt - bind;
+       opt.action = d->tcf_action;
        NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
        NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
        t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
index 1df16d8155ef3c431ba6aa4cefe15cf0c0f15fb7..4ab916b8074be28daea9fe9f329b4b3b460132c7 100644 (file)
@@ -147,14 +147,13 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_skbedit *d = a->priv;
-       struct tc_skbedit opt = {
-               .index   = d->tcf_index,
-               .refcnt  = d->tcf_refcnt - ref,
-               .bindcnt = d->tcf_bindcnt - bind,
-               .action  = d->tcf_action,
-       };
+       struct tc_skbedit opt;
        struct tcf_t t;
 
+       opt.index = d->tcf_index;
+       opt.refcnt = d->tcf_refcnt - ref;
+       opt.bindcnt = d->tcf_bindcnt - bind;
+       opt.action = d->tcf_action;
        NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
        if (d->flags & SKBEDIT_F_PRIORITY)
                NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
index 30280017b05505fe3630b4451cca547309c49fae..4ae6aa562f2bbfd6b90e7d40a43eba1ced694ad3 100644 (file)
@@ -330,24 +330,6 @@ void netif_carrier_off(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_carrier_off);
 
-/**
- *     netif_notify_peers - notify network peers about existence of @dev
- *     @dev: network device
- *
- * Generate traffic such that interested network peers are aware of
- * @dev, such as by generating a gratuitous ARP. This may be used when
- * a device wants to inform the rest of the network about some sort of
- * reconfiguration such as a failover event or virtual machine
- * migration.
- */
-void netif_notify_peers(struct net_device *dev)
-{
-       rtnl_lock();
-       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
-       rtnl_unlock();
-}
-EXPORT_SYMBOL(netif_notify_peers);
-
 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
    under all circumstances. It is difficult to invent anything faster or
    cheaper.
index d494100956af8af2e63a029e39ecd1c638bb9619..5cbda8f1ddfd2f4025a66826cb2aa143cb9e6b06 100644 (file)
@@ -91,6 +91,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
        SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
                          packet, vtag);
 
+       sctp_packet_reset(packet);
        packet->vtag = vtag;
 
        if (ecn_capable && sctp_packet_empty(packet)) {
index 619f96568e8e88bea2ce22186841afe8c8986311..612dc878e05c9aaab9970d322e5ba2798f91bd6a 100644 (file)
@@ -1157,8 +1157,7 @@ SCTP_STATIC __init int sctp_init(void)
 
        /* Set the pressure threshold to be a fraction of global memory that
         * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
-        * memory, with a floor of 128 pages, and a ceiling that prevents an
-        * integer overflow.
+        * memory, with a floor of 128 pages.
         * Note this initalizes the data in sctpv6_prot too
         * Unabashedly stolen from tcp_init
         */
@@ -1166,7 +1165,6 @@ SCTP_STATIC __init int sctp_init(void)
        limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
        limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
        limit = max(limit, 128UL);
-       limit = min(limit, INT_MAX * 4UL / 3 / 2);
        sysctl_sctp_mem[0] = limit / 4 * 3;
        sysctl_sctp_mem[1] = limit;
        sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2;
index 8579b4f2f24f5bd5077bdaa9b142b1e292daeb53..9d881a61ac0262e5145437a6e2116b714c4ac631 100644 (file)
@@ -107,7 +107,7 @@ static const struct sctp_paramhdr prsctp_param = {
        cpu_to_be16(sizeof(struct sctp_paramhdr)),
 };
 
-/* A helper to initialize an op error inside a
+/* A helper to initialize to initialize an op error inside a
  * provided chunk, as most cause codes will be embedded inside an
  * abort chunk.
  */
@@ -124,29 +124,6 @@ void  sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
        chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
 }
 
-/* A helper to initialize an op error inside a
- * provided chunk, as most cause codes will be embedded inside an
- * abort chunk.  Differs from sctp_init_cause in that it won't oops
- * if there isn't enough space in the op error chunk
- */
-int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
-                     size_t paylen)
-{
-       sctp_errhdr_t err;
-       __u16 len;
-
-       /* Cause code constants are now defined in network order.  */
-       err.cause = cause_code;
-       len = sizeof(sctp_errhdr_t) + paylen;
-       err.length  = htons(len);
-
-       if (skb_tailroom(chunk->skb) < len)
-               return -ENOSPC;
-       chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
-                                                    sizeof(sctp_errhdr_t),
-                                                    &err);
-       return 0;
-}
 /* 3.3.2 Initiation (INIT) (1)
  *
  * This chunk is used to initiate a SCTP association between two
@@ -1148,24 +1125,6 @@ nodata:
        return retval;
 }
 
-/* Create an Operation Error chunk of a fixed size,
- * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
- * This is a helper function to allocate an error chunk for
- * for those invalid parameter codes in which we may not want
- * to report all the errors, if the incomming chunk is large
- */
-static inline struct sctp_chunk *sctp_make_op_error_fixed(
-       const struct sctp_association *asoc,
-       const struct sctp_chunk *chunk)
-{
-       size_t size = asoc ? asoc->pathmtu : 0;
-
-       if (!size)
-               size = SCTP_DEFAULT_MAXSEGMENT;
-
-       return sctp_make_op_error_space(asoc, chunk, size);
-}
-
 /* Create an Operation Error chunk.  */
 struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
                                 const struct sctp_chunk *chunk,
@@ -1406,18 +1365,6 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
        return target;
 }
 
-/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
- * space in the chunk
- */
-void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
-                            int len, const void *data)
-{
-       if (skb_tailroom(chunk->skb) >= len)
-               return sctp_addto_chunk(chunk, len, data);
-       else
-               return NULL;
-}
-
 /* Append bytes from user space to the end of a chunk.  Will panic if
  * chunk is not big enough.
  * Returns a kernel err value.
@@ -2021,12 +1968,13 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
                 * returning multiple unknown parameters.
                 */
                if (NULL == *errp)
-                       *errp = sctp_make_op_error_fixed(asoc, chunk);
+                       *errp = sctp_make_op_error_space(asoc, chunk,
+                                       ntohs(chunk->chunk_hdr->length));
 
                if (*errp) {
-                       sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+                       sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
                                        WORD_ROUND(ntohs(param.p->length)));
-                       sctp_addto_chunk_fixed(*errp,
+                       sctp_addto_chunk(*errp,
                                        WORD_ROUND(ntohs(param.p->length)),
                                        param.v);
                } else {
index caff26a3653861f3a4bdd28a5a47656e83db34eb..ed84099da26d0e03f1b249082ad38dd9ee9b86c0 100644 (file)
@@ -1687,8 +1687,6 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
        struct iovec iov;
        int fput_needed;
 
-       if (len > INT_MAX)
-               len = INT_MAX;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
@@ -1746,8 +1744,6 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
        int err, err2;
        int fput_needed;
 
-       if (size > INT_MAX)
-               size = INT_MAX;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
index 4c327008d0d11a454e73c07d8fd4584d5ed84e04..9c5a19d9dbcea17569acee79b7c51990270a712a 100644 (file)
@@ -717,18 +717,17 @@ gss_pipe_release(struct inode *inode)
        struct rpc_inode *rpci = RPC_I(inode);
        struct gss_upcall_msg *gss_msg;
 
-restart:
        spin_lock(&inode->i_lock);
-       list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
+       while (!list_empty(&rpci->in_downcall)) {
 
-               if (!list_empty(&gss_msg->msg.list))
-                       continue;
+               gss_msg = list_entry(rpci->in_downcall.next,
+                               struct gss_upcall_msg, list);
                gss_msg->msg.errno = -EPIPE;
                atomic_inc(&gss_msg->count);
                __gss_unhash_msg(gss_msg);
                spin_unlock(&inode->i_lock);
                gss_release_msg(gss_msg);
-               goto restart;
+               spin_lock(&inode->i_lock);
        }
        spin_unlock(&inode->i_lock);
 
@@ -1274,8 +1273,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
        rqstp->rq_release_snd_buf = priv_release_snd_buf;
        return 0;
 out_free:
-       rqstp->rq_enc_pages_num = i;
-       priv_release_snd_buf(rqstp);
+       for (i--; i >= 0; i--) {
+               __free_page(rqstp->rq_enc_pages[i]);
+       }
 out:
        return -EAGAIN;
 }
index ea1e6de3d6d9a54e68cffc431d0c3e8828d94964..49278f830367eec97d34e862992d6f7f16e1e899 100644 (file)
@@ -47,7 +47,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
                return;
        do {
                msg = list_entry(head->next, struct rpc_pipe_msg, list);
-               list_del_init(&msg->list);
+               list_del(&msg->list);
                msg->errno = err;
                destroy_msg(msg);
        } while (!list_empty(head));
@@ -207,7 +207,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
        if (msg != NULL) {
                spin_lock(&inode->i_lock);
                msg->errno = -EAGAIN;
-               list_del_init(&msg->list);
+               list_del(&msg->list);
                spin_unlock(&inode->i_lock);
                rpci->ops->destroy_msg(msg);
        }
@@ -267,7 +267,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
        if (res < 0 || msg->len == msg->copied) {
                filp->private_data = NULL;
                spin_lock(&inode->i_lock);
-               list_del_init(&msg->list);
+               list_del(&msg->list);
                spin_unlock(&inode->i_lock);
                rpci->ops->destroy_msg(msg);
        }
@@ -587,8 +587,6 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
        struct dentry *dentry;
 
        dentry = __rpc_lookup_create(parent, name);
-       if (IS_ERR(dentry))
-               return dentry;
        if (dentry->d_inode == NULL)
                return dentry;
        dput(dentry);
index 3fbd6ba788e5c6b3e3ebc8252e059cb9972e87ac..0266ccada52af7eabbc833cc912c75f63cae49c4 100644 (file)
@@ -711,10 +711,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
        spin_unlock_bh(&pool->sp_lock);
 
        len = 0;
-       if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
-               dprintk("svc_recv: found XPT_CLOSE\n");
-               svc_delete_xprt(xprt);
-       } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+       if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
+           !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
                struct svc_xprt *newxpt;
                newxpt = xprt->xpt_ops->xpo_accept(xprt);
                if (newxpt) {
@@ -740,7 +738,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                        svc_xprt_received(newxpt);
                }
                svc_xprt_received(xprt);
-       } else {
+       } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
                dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
                        rqstp, pool->sp_id, xprt,
                        atomic_read(&xprt->xpt_ref.refcount));
@@ -753,6 +751,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                dprintk("svc: got len=%d\n", len);
        }
 
+       if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+               dprintk("svc_recv: found XPT_CLOSE\n");
+               svc_delete_xprt(xprt);
+       }
+
        /* No data, incomplete (TCP) read, or accept() */
        if (len == 0 || len == -EAGAIN) {
                rqstp->rq_res.len = 0;
@@ -898,8 +901,11 @@ void svc_delete_xprt(struct svc_xprt *xprt)
        if (test_bit(XPT_TEMP, &xprt->xpt_flags))
                serv->sv_tmpcnt--;
 
-       while ((dr = svc_deferred_dequeue(xprt)) != NULL)
+       for (dr = svc_deferred_dequeue(xprt); dr;
+            dr = svc_deferred_dequeue(xprt)) {
+               svc_xprt_put(xprt);
                kfree(dr);
+       }
 
        svc_xprt_put(xprt);
        spin_unlock_bh(&serv->sv_lock);
index 97cc3de7432e83702ae7913cdf4eb5d93bd4594f..117f68a8aa40c2d220b0dbd0ab3fda6ed6a6d523 100644 (file)
@@ -655,25 +655,23 @@ static struct unix_gid *unix_gid_lookup(uid_t uid)
                return NULL;
 }
 
-static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
+static int unix_gid_find(uid_t uid, struct group_info **gip,
+                        struct svc_rqst *rqstp)
 {
-       struct unix_gid *ug;
-       struct group_info *gi;
-       int ret;
-
-       ug = unix_gid_lookup(uid);
+       struct unix_gid *ug = unix_gid_lookup(uid);
        if (!ug)
-               return ERR_PTR(-EAGAIN);
-       ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
-       switch (ret) {
+               return -EAGAIN;
+       switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
        case -ENOENT:
-               return ERR_PTR(-ENOENT);
+               *gip = NULL;
+               return 0;
        case 0:
-               gi = get_group_info(ug->gi);
+               *gip = ug->gi;
+               get_group_info(*gip);
                cache_put(&ug->h, &unix_gid_cache);
-               return gi;
+               return 0;
        default:
-               return ERR_PTR(-EAGAIN);
+               return -EAGAIN;
        }
 }
 
@@ -683,8 +681,6 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
        struct sockaddr_in *sin;
        struct sockaddr_in6 *sin6, sin6_storage;
        struct ip_map *ipm;
-       struct group_info *gi;
-       struct svc_cred *cred = &rqstp->rq_cred;
 
        switch (rqstp->rq_addr.ss_family) {
        case AF_INET:
@@ -726,17 +722,6 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
                        ip_map_cached_put(rqstp, ipm);
                        break;
        }
-
-       gi = unix_gid_find(cred->cr_uid, rqstp);
-       switch (PTR_ERR(gi)) {
-       case -EAGAIN:
-               return SVC_DROP;
-       case -ENOENT:
-               break;
-       default:
-               put_group_info(cred->cr_group_info);
-               cred->cr_group_info = gi;
-       }
        return SVC_OK;
 }
 
@@ -833,11 +818,19 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
        slen = svc_getnl(argv);                 /* gids length */
        if (slen > 16 || (len -= (slen + 2)*4) < 0)
                goto badcred;
-       cred->cr_group_info = groups_alloc(slen);
-       if (cred->cr_group_info == NULL)
+       if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp)
+           == -EAGAIN)
                return SVC_DROP;
-       for (i = 0; i < slen; i++)
-               GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
+       if (cred->cr_group_info == NULL) {
+               cred->cr_group_info = groups_alloc(slen);
+               if (cred->cr_group_info == NULL)
+                       return SVC_DROP;
+               for (i = 0; i < slen; i++)
+                       GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
+       } else {
+               for (i = 0; i < slen ; i++)
+                       svc_getnl(argv);
+       }
        if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
                *authp = rpc_autherr_badverf;
                return SVC_DENIED;
index 70b0a227dca42bee16f94c971114f3753d52aee6..1c246a4f491e9ee5126a8bace259b32a91955421 100644 (file)
@@ -968,7 +968,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
        return len;
  err_delete:
        set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
-       svc_xprt_received(&svsk->sk_xprt);
  err_again:
        return -EAGAIN;
 }
index b6fcf68dc35cdd3a2fd08942151c5771338f05cb..37c5475ba258b51b22c6722aeb605741c2732051 100644 (file)
@@ -238,8 +238,7 @@ struct sock_xprt {
         * State of TCP reply receive
         */
        __be32                  tcp_fraghdr,
-                               tcp_xid,
-                               tcp_calldir;
+                               tcp_xid;
 
        u32                     tcp_offset,
                                tcp_reclen;
@@ -962,7 +961,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
 {
        size_t len, used;
        u32 offset;
-       char *p;
+       __be32  calldir;
 
        /*
         * We want transport->tcp_offset to be 8 at the end of this routine
@@ -971,33 +970,26 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
         * transport->tcp_offset is 4 (after having already read the xid).
         */
        offset = transport->tcp_offset - sizeof(transport->tcp_xid);
-       len = sizeof(transport->tcp_calldir) - offset;
+       len = sizeof(calldir) - offset;
        dprintk("RPC:       reading CALL/REPLY flag (%Zu bytes)\n", len);
-       p = ((char *) &transport->tcp_calldir) + offset;
-       used = xdr_skb_read_bits(desc, p, len);
+       used = xdr_skb_read_bits(desc, &calldir, len);
        transport->tcp_offset += used;
        if (used != len)
                return;
        transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
+       transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+       transport->tcp_flags |= TCP_RCV_COPY_DATA;
        /*
         * We don't yet have the XDR buffer, so we will write the calldir
         * out after we get the buffer from the 'struct rpc_rqst'
         */
-       switch (ntohl(transport->tcp_calldir)) {
-       case RPC_REPLY:
-               transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
-               transport->tcp_flags |= TCP_RCV_COPY_DATA;
+       if (ntohl(calldir) == RPC_REPLY)
                transport->tcp_flags |= TCP_RPC_REPLY;
-               break;
-       case RPC_CALL:
-               transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
-               transport->tcp_flags |= TCP_RCV_COPY_DATA;
+       else
                transport->tcp_flags &= ~TCP_RPC_REPLY;
-               break;
-       default:
-               dprintk("RPC:       invalid request message type\n");
-               xprt_force_disconnect(&transport->xprt);
-       }
+       dprintk("RPC:       reading %s CALL/REPLY flag %08x\n",
+                       (transport->tcp_flags & TCP_RPC_REPLY) ?
+                               "reply for" : "request with", calldir);
        xs_tcp_check_fraghdr(transport);
 }
 
@@ -1017,10 +1009,12 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
                /*
                 * Save the RPC direction in the XDR buffer
                 */
+               __be32  calldir = transport->tcp_flags & TCP_RPC_REPLY ?
+                                       htonl(RPC_REPLY) : 0;
+
                memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
-                       &transport->tcp_calldir,
-                       sizeof(transport->tcp_calldir));
-               transport->tcp_copied += sizeof(transport->tcp_calldir);
+                       &calldir, sizeof(calldir));
+               transport->tcp_copied += sizeof(calldir);
                transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
        }
 
@@ -1932,11 +1926,6 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
        case -EALREADY:
                xprt_clear_connecting(xprt);
                return;
-       case -EINVAL:
-               /* Happens, for instance, if the user specified a link
-                * local IPv6 address without a scope-id.
-                */
-               goto out;
        }
 out_eagain:
        status = -EAGAIN;
index 78091375ca120988ee81e4a1841bc4176103837f..327011fcc407941721d0e207f0e69b1f81c2b162 100644 (file)
 
 #define MAX_ADDR_STR 32
 
-static struct media media_list[MAX_MEDIA];
+static struct media *media_list = NULL;
 static u32 media_count = 0;
 
-struct bearer tipc_bearers[MAX_BEARERS];
+struct bearer *tipc_bearers = NULL;
 
 /**
  * media_name_valid - validate media name
@@ -108,11 +108,9 @@ int  tipc_register_media(u32 media_type,
        int res = -EINVAL;
 
        write_lock_bh(&tipc_net_lock);
-
-       if (tipc_mode != TIPC_NET_MODE) {
-               warn("Media <%s> rejected, not in networked mode yet\n", name);
+       if (!media_list)
                goto exit;
-       }
+
        if (!media_name_valid(name)) {
                warn("Media <%s> rejected, illegal name\n", name);
                goto exit;
@@ -662,10 +660,33 @@ int tipc_disable_bearer(const char *name)
 
 
 
+int tipc_bearer_init(void)
+{
+       int res;
+
+       write_lock_bh(&tipc_net_lock);
+       tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
+       media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
+       if (tipc_bearers && media_list) {
+               res = 0;
+       } else {
+               kfree(tipc_bearers);
+               kfree(media_list);
+               tipc_bearers = NULL;
+               media_list = NULL;
+               res = -ENOMEM;
+       }
+       write_unlock_bh(&tipc_net_lock);
+       return res;
+}
+
 void tipc_bearer_stop(void)
 {
        u32 i;
 
+       if (!tipc_bearers)
+               return;
+
        for (i = 0; i < MAX_BEARERS; i++) {
                if (tipc_bearers[i].active)
                        tipc_bearers[i].publ.blocked = 1;
@@ -674,6 +695,10 @@ void tipc_bearer_stop(void)
                if (tipc_bearers[i].active)
                        bearer_disable(tipc_bearers[i].publ.name);
        }
+       kfree(tipc_bearers);
+       kfree(media_list);
+       tipc_bearers = NULL;
+       media_list = NULL;
        media_count = 0;
 }
 
index 000228e93f9ef75e3dd008b4b76b4a9f56b963c6..ca5734892713d19f9d6cc37b1fc48b684cc5d539 100644 (file)
@@ -114,7 +114,7 @@ struct bearer_name {
 
 struct link;
 
-extern struct bearer tipc_bearers[];
+extern struct bearer *tipc_bearers;
 
 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
index f25b1cdb64eb069cba0bf991df72930248ba371c..7906608bf510f4d6eec420e739256e8658647be6 100644 (file)
 */
 
 DEFINE_RWLOCK(tipc_net_lock);
-struct _zone *tipc_zones[256] = { NULL, };
-struct network tipc_net = { tipc_zones };
+struct network tipc_net = { NULL };
 
 struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
 {
@@ -159,12 +158,28 @@ void tipc_net_send_external_routes(u32 dest)
        }
 }
 
+static int net_init(void)
+{
+       memset(&tipc_net, 0, sizeof(tipc_net));
+       tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
+       if (!tipc_net.zones) {
+               return -ENOMEM;
+       }
+       return 0;
+}
+
 static void net_stop(void)
 {
        u32 z_num;
 
-       for (z_num = 1; z_num <= tipc_max_zones; z_num++)
+       if (!tipc_net.zones)
+               return;
+
+       for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
                tipc_zone_delete(tipc_net.zones[z_num]);
+       }
+       kfree(tipc_net.zones);
+       tipc_net.zones = NULL;
 }
 
 static void net_route_named_msg(struct sk_buff *buf)
@@ -267,7 +282,9 @@ int tipc_net_start(u32 addr)
        tipc_named_reinit();
        tipc_port_reinit();
 
-       if ((res = tipc_cltr_init()) ||
+       if ((res = tipc_bearer_init()) ||
+           (res = net_init()) ||
+           (res = tipc_cltr_init()) ||
            (res = tipc_bclink_init())) {
                return res;
        }
index 065dc6677e90e606205112d0aa846b549c4e2284..fc820cd7545309645c018da6a29d170bc67ad0f2 100644 (file)
@@ -671,7 +671,6 @@ static int unix_autobind(struct socket *sock)
        static u32 ordernum = 1;
        struct unix_address *addr;
        int err;
-       unsigned int retries = 0;
 
        mutex_lock(&u->readlock);
 
@@ -697,17 +696,9 @@ retry:
        if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
                                      addr->hash)) {
                spin_unlock(&unix_table_lock);
-               /*
-                * __unix_find_socket_byname() may take long time if many names
-                * are already in use.
-                */
-               cond_resched();
-               /* Give up if all names seems to be in use. */
-               if (retries++ == 0xFFFFF) {
-                       err = -ENOSPC;
-                       kfree(addr);
-                       goto out;
-               }
+               /* Sanity yield. It is unusual case, but yet... */
+               if (!(ordernum&0xFF))
+                       yield();
                goto retry;
        }
        addr->hash ^= sk->sk_type;
index 376798fd6b16dda652711cc164d5817ff0648520..68b321997d4cbe6dbe764f9b20b191135f9aed3f 100644 (file)
@@ -385,8 +385,6 @@ int rdev_set_freq(struct cfg80211_registered_device *rdev,
                  struct wireless_dev *for_wdev,
                  int freq, enum nl80211_channel_type channel_type);
 
-u16 cfg80211_calculate_bitrate(struct rate_info *rate);
-
 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
 #define CFG80211_DEV_WARN_ON(cond)     WARN_ON(cond)
 #else
index ec9a9d40a6c667ce3b8f5f4ed168508c98d457eb..0d862482def8e1685549d083263a8fab6187950a 100644 (file)
@@ -43,10 +43,10 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
                }
        }
 
-       if (done) {
-               nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
-               cfg80211_sme_rx_auth(dev, buf, len);
-       }
+       WARN_ON(!done);
+
+       nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
+       cfg80211_sme_rx_auth(dev, buf, len);
 
        wdev_unlock(wdev);
 }
index b75e718e1ad1467fbb8332c7289f3983fad9ef1a..ca3c92a0a14f26898c5e07a137a54393e1279fdd 100644 (file)
@@ -1562,6 +1562,39 @@ static int parse_station_flags(struct genl_info *info,
        return 0;
 }
 
+static u16 nl80211_calculate_bitrate(struct rate_info *rate)
+{
+       int modulation, streams, bitrate;
+
+       if (!(rate->flags & RATE_INFO_FLAGS_MCS))
+               return rate->legacy;
+
+       /* the formula below does only work for MCS values smaller than 32 */
+       if (rate->mcs >= 32)
+               return 0;
+
+       modulation = rate->mcs & 7;
+       streams = (rate->mcs >> 3) + 1;
+
+       bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
+                       13500000 : 6500000;
+
+       if (modulation < 4)
+               bitrate *= (modulation + 1);
+       else if (modulation == 4)
+               bitrate *= (modulation + 2);
+       else
+               bitrate *= (modulation + 3);
+
+       bitrate *= streams;
+
+       if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+               bitrate = (bitrate / 9) * 10;
+
+       /* do NOT round down here */
+       return (bitrate + 50000) / 100000;
+}
+
 static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
                                int flags, struct net_device *dev,
                                u8 *mac_addr, struct station_info *sinfo)
@@ -1608,8 +1641,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
                if (!txrate)
                        goto nla_put_failure;
 
-               /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
-               bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
+               /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */
+               bitrate = nl80211_calculate_bitrate(&sinfo->txrate);
                if (bitrate > 0)
                        NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
 
index 91ec925ebdbfb48f6cadb6043e3616e2477a0362..e5f92ee758f44226999112603a4f3d59a23f222f 100644 (file)
@@ -270,7 +270,6 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
 {
        struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
        struct cfg80211_internal_bss *bss, *res = NULL;
-       unsigned long now = jiffies;
 
        spin_lock_bh(&dev->bss_lock);
 
@@ -279,10 +278,6 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                        continue;
                if (channel && bss->pub.channel != channel)
                        continue;
-               /* Don't get expired BSS structs */
-               if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
-                   !atomic_read(&bss->hold))
-                       continue;
                if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
                        res = bss;
                        kref_get(&res->ref);
index a6a38b17645fda302831ce1817db16fcd6b44b90..3fc2df86278fcf7390c5d2b3cce484a7abb04f4d 100644 (file)
@@ -682,36 +682,3 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
 
        return err;
 }
-
-u16 cfg80211_calculate_bitrate(struct rate_info *rate)
-{
-       int modulation, streams, bitrate;
-
-       if (!(rate->flags & RATE_INFO_FLAGS_MCS))
-               return rate->legacy;
-
-       /* the formula below does only work for MCS values smaller than 32 */
-       if (rate->mcs >= 32)
-               return 0;
-
-       modulation = rate->mcs & 7;
-       streams = (rate->mcs >> 3) + 1;
-
-       bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
-                       13500000 : 6500000;
-
-       if (modulation < 4)
-               bitrate *= (modulation + 1);
-       else if (modulation == 4)
-               bitrate *= (modulation + 2);
-       else
-               bitrate *= (modulation + 3);
-
-       bitrate *= streams;
-
-       if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
-               bitrate = (bitrate / 9) * 10;
-
-       /* do NOT round down here */
-       return (bitrate + 50000) / 100000;
-}
index 62cfc0cf55e2fee476b8cae97ddd2f0bf19171a0..561a45cf2a6af562784113cedfd0952108895f9e 100644 (file)
@@ -1227,7 +1227,10 @@ int cfg80211_wext_giwrate(struct net_device *dev,
        if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
                return -EOPNOTSUPP;
 
-       rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
+       rate->value = 0;
+
+       if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
+               rate->value = 100000 * sinfo.txrate.legacy;
 
        return 0;
 }
@@ -1358,9 +1361,6 @@ int cfg80211_wext_giwessid(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
 
-       data->flags = 0;
-       data->length = 0;
-
        switch (wdev->iftype) {
        case NL80211_IFTYPE_ADHOC:
                return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
index a2e4c60ac23904da29d40fa05f074dcd490e5618..60fe57761ca94bb5d9faf0638c00d373618301e2 100644 (file)
@@ -854,22 +854,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
                }
        }
 
-       if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
-               /*
-                * If this is a GET, but not NOMAX, it means that the extra
-                * data is not bounded by userspace, but by max_tokens. Thus
-                * set the length to max_tokens. This matches the extra data
-                * allocation.
-                * The driver should fill it with the number of tokens it
-                * provided, and it may check iwp->length rather than having
-                * knowledge of max_tokens. If the driver doesn't change the
-                * iwp->length, this ioctl just copies back max_token tokens
-                * filled with zeroes. Hopefully the driver isn't claiming
-                * them to be valid data.
-                */
-               iwp->length = descr->max_tokens;
-       }
-
        err = handler(dev, info, (union iwreq_data *) iwp, extra);
 
        iwp->length += essid_compat;
@@ -1029,7 +1013,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
        } else if (!iwp->pointer)
                return -EFAULT;
 
-       extra = kzalloc(extra_size, GFP_KERNEL);
+       extra = kmalloc(extra_size, GFP_KERNEL);
        if (!extra)
                return -ENOMEM;
 
index d00681646d1de54b31101ae8bbab2a0a209cc4fa..7fa9c7ad3d3b5b7efbe4c612511425628900a138 100644 (file)
@@ -81,41 +81,6 @@ struct compat_x25_subscrip_struct {
 };
 #endif
 
-
-int x25_parse_address_block(struct sk_buff *skb,
-               struct x25_address *called_addr,
-               struct x25_address *calling_addr)
-{
-       unsigned char len;
-       int needed;
-       int rc;
-
-       if (skb->len < 1) {
-               /* packet has no address block */
-               rc = 0;
-               goto empty;
-       }
-
-       len = *skb->data;
-       needed = 1 + (len >> 4) + (len & 0x0f);
-
-       if (skb->len < needed) {
-               /* packet is too short to hold the addresses it claims
-                  to hold */
-               rc = -1;
-               goto empty;
-       }
-
-       return x25_addr_ntoa(skb->data, called_addr, calling_addr);
-
-empty:
-       *called_addr->x25_addr = 0;
-       *calling_addr->x25_addr = 0;
-
-       return rc;
-}
-
-
 int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
                  struct x25_address *calling_addr)
 {
@@ -906,26 +871,16 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
        /*
         *      Extract the X.25 addresses and convert them to ASCII strings,
         *      and remove them.
-        *
-        *      Address block is mandatory in call request packets
         */
-       addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
-       if (addr_len <= 0)
-               goto out_clear_request;
+       addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
        skb_pull(skb, addr_len);
 
        /*
         *      Get the length of the facilities, skip past them for the moment
         *      get the call user data because this is needed to determine
         *      the correct listener
-        *
-        *      Facilities length is mandatory in call request packets
         */
-       if (skb->len < 1)
-               goto out_clear_request;
        len = skb->data[0] + 1;
-       if (skb->len < len)
-               goto out_clear_request;
        skb_pull(skb,len);
 
        /*
index 804afd3a1f7a3c1833c914f6c9c6addde04dbd57..a21f6646eb3a85cd1275708ba53960ed085402da 100644 (file)
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
 {
        unsigned char *p = skb->data;
-       unsigned int len;
+       unsigned int len = *p++;
 
        *vc_fac_mask = 0;
 
@@ -50,19 +50,9 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
        memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
        memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
 
-       if (skb->len < 1)
-               return 0;
-
-       len = *p++;
-
-       if (len >= skb->len)
-               return -1;
-
        while (len > 0) {
                switch (*p & X25_FAC_CLASS_MASK) {
                case X25_FAC_CLASS_A:
-                       if (len < 2)
-                               return 0;
                        switch (*p) {
                        case X25_FAC_REVERSE:
                                if((p[1] & 0x81) == 0x81) {
@@ -106,8 +96,6 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                        len -= 2;
                        break;
                case X25_FAC_CLASS_B:
-                       if (len < 3)
-                               return 0;
                        switch (*p) {
                        case X25_FAC_PACKET_SIZE:
                                facilities->pacsize_in  = p[1];
@@ -129,8 +117,6 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                        len -= 3;
                        break;
                case X25_FAC_CLASS_C:
-                       if (len < 4)
-                               return 0;
                        printk(KERN_DEBUG "X.25: unknown facility %02X, "
                               "values %02X, %02X, %02X\n",
                               p[0], p[1], p[2], p[3]);
@@ -138,26 +124,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                        len -= 4;
                        break;
                case X25_FAC_CLASS_D:
-                       if (len < p[1] + 2)
-                               return 0;
                        switch (*p) {
                        case X25_FAC_CALLING_AE:
-                               if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-                                       return 0;
+                               if (p[1] > X25_MAX_DTE_FACIL_LEN)
+                                       break;
                                dte_facs->calling_len = p[2];
                                memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
                                *vc_fac_mask |= X25_MASK_CALLING_AE;
                                break;
                        case X25_FAC_CALLED_AE:
-                               if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-                                       return 0;
+                               if (p[1] > X25_MAX_DTE_FACIL_LEN)
+                                       break;
                                dte_facs->called_len = p[2];
                                memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
                                *vc_fac_mask |= X25_MASK_CALLED_AE;
                                break;
                        default:
                                printk(KERN_DEBUG "X.25: unknown facility %02X,"
-                                       "length %d\n", p[0], p[1]);
+                                       "length %d, values %02X, %02X, "
+                                       "%02X, %02X\n",
+                                       p[0], p[1], p[2], p[3], p[4], p[5]);
                                break;
                        }
                        len -= p[1] + 2;
@@ -261,8 +247,6 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
        memcpy(new, ours, sizeof(*new));
 
        len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
-       if (len < 0)
-               return len;
 
        /*
         *      They want reverse charging, we won't accept it.
index 88d7652cf166be30c583e40ff3c81a923478447b..7d7c3abf38b573fa64f1bb35026dc5bf80cdcd3c 100644 (file)
@@ -89,7 +89,6 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
 {
        struct x25_address source_addr, dest_addr;
-       int len;
 
        switch (frametype) {
                case X25_CALL_ACCEPTED: {
@@ -107,19 +106,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
                         *      Parse the data in the frame.
                         */
                        skb_pull(skb, X25_STD_MIN_LEN);
-
-                       len = x25_parse_address_block(skb, &source_addr,
-                                               &dest_addr);
-                       if (len > 0)
-                               skb_pull(skb, len);
-
-                       len = x25_parse_facilities(skb, &x25->facilities,
+                       skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
+                       skb_pull(skb,
+                                x25_parse_facilities(skb, &x25->facilities,
                                                &x25->dte_facilities,
-                                               &x25->vc_facil_mask);
-                       if (len > 0)
-                               skb_pull(skb, len);
-                       else
-                               return -1;
+                                               &x25->vc_facil_mask));
                        /*
                         *      Copy any Call User Data.
                         */
index 5325423ceab483833c8f09efb37a336aaa8f9588..67d59c7a18dc57b1580a833ed37e832eff227c85 100644 (file)
@@ -44,9 +44,7 @@ all:
 
 Makefile:;
 
-\$(all): all
+\$(all) %/: all
        @:
 
-%/: all
-       @:
 EOF
index 03efeabdfdb9ffebf1b2969e156b07346ebd6a9f..801a16a1754532cee8f647ef178772209973120d 100644 (file)
@@ -1311,7 +1311,7 @@ static unsigned int *reloc_location(struct elf_info *elf,
        int section = sechdr->sh_info;
 
        return (void *)elf->hdr + sechdrs[section].sh_offset +
-               r->r_offset - sechdrs[section].sh_addr;
+               (r->r_offset - sechdrs[section].sh_addr);
 }
 
 static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
index 3d78d69a629bcd5bbafc644bb3cca0a513e39565..f7496c6a022b7c2213f061c17b435bf0328085ed 100644 (file)
@@ -168,13 +168,13 @@ static int create_by_name(const char *name, mode_t mode,
 
        mutex_lock(&parent->d_inode->i_mutex);
        *dentry = lookup_one_len(name, parent, strlen(name));
-       if (!IS_ERR(*dentry)) {
+       if (!IS_ERR(dentry)) {
                if ((mode & S_IFMT) == S_IFDIR)
                        error = mkdir(parent->d_inode, *dentry, mode);
                else
                        error = create(parent->d_inode, *dentry, mode);
        } else
-               error = PTR_ERR(*dentry);
+               error = PTR_ERR(dentry);
        mutex_unlock(&parent->d_inode->i_mutex);
 
        return error;
index b0bd910b1de7e0a4dc4e4db20dd51fec7118310e..1cad4c7b1c337ba9e3a4db0066a7776937f12c44 100644 (file)
@@ -1259,7 +1259,6 @@ long keyctl_session_to_parent(void)
        keyring_r = NULL;
 
        me = current;
-       rcu_read_lock();
        write_lock_irq(&tasklist_lock);
 
        parent = me->real_parent;
@@ -1292,8 +1291,7 @@ long keyctl_session_to_parent(void)
                goto not_permitted;
 
        /* the keyrings must have the same UID */
-       if ((pcred->tgcred->session_keyring &&
-            pcred->tgcred->session_keyring->uid != mycred->euid) ||
+       if (pcred ->tgcred->session_keyring->uid != mycred->euid ||
            mycred->tgcred->session_keyring->uid != mycred->euid)
                goto not_permitted;
 
@@ -1315,7 +1313,6 @@ long keyctl_session_to_parent(void)
        set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
 
        write_unlock_irq(&tasklist_lock);
-       rcu_read_unlock();
        if (oldcred)
                put_cred(oldcred);
        return 0;
@@ -1324,7 +1321,6 @@ already_same:
        ret = 0;
 not_permitted:
        write_unlock_irq(&tasklist_lock);
-       rcu_read_unlock();
        put_cred(cred);
        return ret;
 
index e031952a49e133ed45f613ff0c06af57db3c7ec4..8ec02746ca993fa697da600757fbcd21b319e2f7 100644 (file)
@@ -524,8 +524,9 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
        struct key *keyring;
        int bucket;
 
+       keyring = ERR_PTR(-EINVAL);
        if (!name)
-               return ERR_PTR(-EINVAL);
+               goto error;
 
        bucket = keyring_hash(name);
 
@@ -552,18 +553,17 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
                                           KEY_SEARCH) < 0)
                                continue;
 
-                       /* we've got a match but we might end up racing with
-                        * key_cleanup() if the keyring is currently 'dead'
-                        * (ie. it has a zero usage count) */
-                       if (!atomic_inc_not_zero(&keyring->usage))
-                               continue;
-                       goto out;
+                       /* we've got a match */
+                       atomic_inc(&keyring->usage);
+                       read_unlock(&keyring_name_lock);
+                       goto error;
                }
        }
 
-       keyring = ERR_PTR(-ENOKEY);
-out:
        read_unlock(&keyring_name_lock);
+       keyring = ERR_PTR(-ENOKEY);
+
+ error:
        return keyring;
 
 } /* end find_keyring_by_name() */
index 931cfda6e1f9ff2c2030a04335fd4d2a47820e99..5c23afb31ece464ad0165e1a3fb052a8969244c5 100644 (file)
@@ -509,7 +509,7 @@ try_again:
 
                        ret = install_thread_keyring();
                        if (ret < 0) {
-                               key_ref = ERR_PTR(ret);
+                               key = ERR_PTR(ret);
                                goto error;
                        }
                        goto reget_creds;
@@ -527,7 +527,7 @@ try_again:
 
                        ret = install_process_keyring();
                        if (ret < 0) {
-                               key_ref = ERR_PTR(ret);
+                               key = ERR_PTR(ret);
                                goto error;
                        }
                        goto reget_creds;
@@ -586,7 +586,7 @@ try_again:
 
        case KEY_SPEC_GROUP_KEYRING:
                /* group keyrings are not yet supported */
-               key_ref = ERR_PTR(-EINVAL);
+               key = ERR_PTR(-EINVAL);
                goto error;
 
        case KEY_SPEC_REQKEY_AUTH_KEY:
index 9ac7bfd3bbdd386fedd677077f9ac834fd643c17..03fe63ed55bda1a1cfacc95138e409e8ef53f0d2 100644 (file)
@@ -336,10 +336,8 @@ static int construct_alloc_key(struct key_type *type,
 
 key_already_present:
        mutex_unlock(&key_construction_mutex);
-       if (dest_keyring) {
-               __key_link(dest_keyring, key_ref_to_ptr(key_ref));
+       if (dest_keyring)
                up_write(&dest_keyring->sem);
-       }
        mutex_unlock(&user->cons_lock);
        key_put(key);
        *_key = key = key_ref_to_ptr(key_ref);
@@ -430,11 +428,6 @@ struct key *request_key_and_link(struct key_type *type,
 
        if (!IS_ERR(key_ref)) {
                key = key_ref_to_ptr(key_ref);
-               if (dest_keyring) {
-                       construct_get_dest_keyring(&dest_keyring);
-                       key_link(dest_keyring, key);
-                       key_put(dest_keyring);
-               }
        } else if (PTR_ERR(key_ref) != -EAGAIN) {
                key = ERR_CAST(key_ref);
        } else  {
index d9f94258c10866a5bbc119010ab39e2b18905672..c844eed7915d0d270c058c16d6b3db40ffa576d0 100644 (file)
@@ -33,9 +33,6 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
 {
        int ret;
 
-       if (write && !capable(CAP_SYS_RAWIO))
-               return -EPERM;
-
        ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 
        update_mmap_min_addr();
index 04b6145d767f96093423d5f733e6fe1b6a4e5687..68c7348d1acc6628f4c6121207edc34f6a7f707a 100644 (file)
@@ -128,7 +128,7 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
                        cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
                        cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
                        c_iter->bitmap[cmap_idx]
-                               |= e_iter->maps[i] << cmap_sft;
+                               |= e_iter->maps[cmap_idx] << cmap_sft;
                }
                e_iter = e_iter->next;
        }
index 7834a5438f756b8aab0efd6fe77e93bb46820a1f..a8b7fabe645ef1d0bf7fc82f4d15e4cd7024b1ed 100644 (file)
@@ -31,7 +31,6 @@
 
 /* max number of user-defined controls */
 #define MAX_USER_CONTROLS      32
-#define MAX_CONTROL_COUNT      1028
 
 struct snd_kctl_ioctl {
        struct list_head list;          /* list of all ioctls */
@@ -191,10 +190,6 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
        
        if (snd_BUG_ON(!control || !control->count))
                return NULL;
-
-       if (control->count > MAX_CONTROL_COUNT)
-               return NULL;
-
        kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
        if (kctl == NULL) {
                snd_printk(KERN_ERR "Cannot allocate control instance\n");
index d3a81c8681ba7dd46413c52422626a102c7308e1..9a127a033536b823ceda7903f52f030afc6ac00d 100755 (executable)
@@ -314,10 +314,10 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
        if (!params->info)
                params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES;
        if (!params->fifo_size) {
-               m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
-               i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
-               if (snd_mask_min(m) == snd_mask_max(m) &&
-                    snd_interval_min(i) == snd_interval_max(i)) {
+               if (snd_mask_min(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT]) ==
+                   snd_mask_max(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT]) &&
+                    snd_mask_min(&params->masks[SNDRV_PCM_HW_PARAM_CHANNELS]) ==
+                    snd_mask_max(&params->masks[SNDRV_PCM_HW_PARAM_CHANNELS])) {
                        changed = substream->ops->ioctl(substream,
                                        SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
                        if (changed < 0)
@@ -972,10 +972,6 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
 {
        if (substream->runtime->trigger_master != substream)
                return 0;
-       /* some drivers might use hw_ptr to recover from the pause -
-          update the hw_ptr now */
-       if (push)
-               snd_pcm_update_hw_ptr(substream);
        /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
         * a delta betwen the current jiffies, this gives a large enough
         * delta, effectively to skip the check once.
index e4c12a1ee30448b78df95e17aedda87832b52314..70d6f25ba526147d50148cd04fff2f9bf612d0dd 100644 (file)
@@ -530,15 +530,13 @@ static int snd_rawmidi_release(struct inode *inode, struct file *file)
 {
        struct snd_rawmidi_file *rfile;
        struct snd_rawmidi *rmidi;
-       struct module *module;
 
        rfile = file->private_data;
        rmidi = rfile->rmidi;
        rawmidi_release_priv(rfile);
        kfree(rfile);
-       module = rmidi->card->module;
        snd_card_file_remove(rmidi->card, file);
-       module_put(module);
+       module_put(rmidi->card->module);
        return 0;
 }
 
index 1f133fe4228bba74ecaf4fccef895b470650549d..d0d721c22eacca6d4379dab12f9fc4dd57238eaa 100644 (file)
@@ -280,10 +280,13 @@ snd_seq_oss_open(struct file *file, int level)
        return 0;
 
  _error:
+       snd_seq_oss_writeq_delete(dp->writeq);
+       snd_seq_oss_readq_delete(dp->readq);
        snd_seq_oss_synth_cleanup(dp);
        snd_seq_oss_midi_cleanup(dp);
-       delete_seq_queue(dp->queue);
        delete_port(dp);
+       delete_seq_queue(dp->queue);
+       kfree(dp);
 
        return rc;
 }
@@ -346,10 +349,8 @@ create_port(struct seq_oss_devinfo *dp)
 static int
 delete_port(struct seq_oss_devinfo *dp)
 {
-       if (dp->port < 0) {
-               kfree(dp);
+       if (dp->port < 0)
                return 0;
-       }
 
        debug_printk(("delete_port %i\n", dp->port));
        return snd_seq_event_port_detach(dp->cseq, dp->port);
index e7efcef7b812e382c1dc477e2b973e151431adbf..67ca4403c1c6d00a1b7cdbf172b4326be5d6cf86 100644 (file)
@@ -1867,14 +1867,12 @@ static unsigned int ad1981_jacks_blacklist[] = {
        0x10140523, /* Thinkpad R40 */
        0x10140534, /* Thinkpad X31 */
        0x10140537, /* Thinkpad T41p */
-       0x1014053e, /* Thinkpad R40e */
        0x10140554, /* Thinkpad T42p/R50p */
        0x10140567, /* Thinkpad T43p 2668-G7U */
        0x10140581, /* Thinkpad X41-2527 */
        0x10280160, /* Dell Dimension 2400 */
        0x104380b0, /* Asus A7V8X-MX */
        0x11790241, /* Toshiba Satellite A-15 S127 */
-       0x1179ff10, /* Toshiba P500 */
        0x144dc01a, /* Samsung NP-X20C004/SEG */
        0 /* end */
 };
index 78c8736638d3173b8b991ab4a7bab4d79aed3db0..ddcd4a9fd7e69f1cf63fcbc0d79adc8868c4237a 100644 (file)
@@ -941,21 +941,13 @@ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci
                                                struct snd_pcm_substream *substream)
 {
        size_t ptr;
-       unsigned int reg, rem, tries;
-
+       unsigned int reg;
        if (!rec->running)
                return 0;
 #if 1 // this seems better..
        reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2;
-       for (tries = 0; tries < 3; tries++) {
-               rem = snd_cmipci_read_w(cm, reg);
-               if (rem < rec->dma_size)
-                       goto ok;
-       }
-       printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem);
-       return SNDRV_PCM_POS_XRUN;
-ok:
-       ptr = (rec->dma_size - (rem + 1)) >> rec->shift;
+       ptr = rec->dma_size - (snd_cmipci_read_w(cm, reg) + 1);
+       ptr >>= rec->shift;
 #else
        reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1;
        ptr = snd_cmipci_read(cm, reg) - rec->offset;
index 641d7f07392c63edea8bcf8a9bc71957914239cd..1305f7ca02c3c2726049dc448dec6b7fa36a63cd 100644 (file)
@@ -1821,9 +1821,7 @@ static irqreturn_t snd_echo_interrupt(int irq, void *dev_id)
        /* The hardware doesn't tell us which substream caused the irq,
        thus we have to check all running substreams. */
        for (ss = 0; ss < DSP_MAXPIPES; ss++) {
-               substream = chip->substream[ss];
-               if (substream && ((struct audiopipe *)substream->runtime->
-                               private_data)->state == PIPE_STATE_STARTED) {
+               if ((substream = chip->substream[ss])) {
                        period = pcm_pointer(substream) /
                                substream->runtime->period_size;
                        if (period != chip->last_period[ss]) {
index 92626f3aab2dbc9f4c0c7b16f098056f25854ba1..168af67d938e1415993b29eb166763327ec0048b 100644 (file)
@@ -52,7 +52,6 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
 static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
 static int enable_ir[SNDRV_CARDS];
 static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
-static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
@@ -74,8 +73,6 @@ module_param_array(enable_ir, bool, NULL, 0444);
 MODULE_PARM_DESC(enable_ir, "Enable IR.");
 module_param_array(subsystem, uint, NULL, 0444);
 MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
-module_param_array(delay_pcm_irq, uint, NULL, 0444);
-MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
 /*
  * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value  Model:SB0400
  */
@@ -130,7 +127,6 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
                                      &emu)) < 0)
                goto error;
        card->private_data = emu;
-       emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
        if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
                goto error;
        if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
index 622bace148e3c4e5e4de3efa1936b56245004c69..55b83ef73c630e83b2d098a45cce1f570923df7f 100644 (file)
@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
                evoice->epcm->ccca_start_addr = start_addr + ccis;
                if (extra) {
                        start_addr += ccis;
-                       end_addr += ccis + emu->delay_pcm_irq;
+                       end_addr += ccis;
                }
                if (stereo && !extra) {
                        snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
@@ -360,9 +360,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
        /* Assumption that PT is already 0 so no harm overwriting */
        snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
        snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
-       snd_emu10k1_ptr_write(emu, PSST, voice,
-                       (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
-                       (send_amount[2] << 24));
+       snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
        if (emu->card_capabilities->emu_model)
                pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
        else 
@@ -734,23 +732,6 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_
        snd_emu10k1_ptr_write(emu, IP, voice, 0);
 }
 
-static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
-               struct snd_emu10k1_pcm *epcm,
-               struct snd_pcm_substream *substream,
-               struct snd_pcm_runtime *runtime)
-{
-       unsigned int ptr, period_pos;
-
-       /* try to sychronize the current position for the interrupt
-          source voice */
-       period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
-       period_pos %= runtime->period_size;
-       ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
-       ptr &= ~0x00ffffff;
-       ptr |= epcm->ccca_start_addr + period_pos;
-       snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
-}
-
 static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
                                        int cmd)
 {
@@ -772,8 +753,6 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
                /* follow thru */
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
        case SNDRV_PCM_TRIGGER_RESUME:
-               if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
-                       snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
                mix = &emu->pcm_mixer[substream->number];
                snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
                snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
@@ -890,9 +869,8 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream *
 #endif
        /*
        printk(KERN_DEBUG
-              "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
-              (long)ptr, (long)runtime->buffer_size,
-              (long)runtime->period_size);
+              "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
+              ptr, runtime->buffer_size, runtime->period_size);
        */
        return ptr;
 }
index 7c3ce5f379f2ba5b5023170748119a6b0033e763..6a47672f930aedfe67163ce56a0e5a726470e734 100644 (file)
@@ -309,10 +309,8 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
        if (snd_BUG_ON(!hdr))
                return NULL;
 
-       idx = runtime->period_size >= runtime->buffer_size ?
-                                       (emu->delay_pcm_irq * 2) : 0;
        mutex_lock(&hdr->block_mutex);
-       blk = search_empty(emu, runtime->dma_bytes + idx);
+       blk = search_empty(emu, runtime->dma_bytes);
        if (blk == NULL) {
                mutex_unlock(&hdr->block_mutex);
                return NULL;
index cc2a5a2a147b2f9fb656ef5a348f363f7b5ec246..fec8724b9125c50c8dc0d2f432167302e31a5ee8 100644 (file)
@@ -116,7 +116,6 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
                         "{Intel, ICH9},"
                         "{Intel, ICH10},"
                         "{Intel, PCH},"
-                        "{Intel, CPT},"
                         "{Intel, SCH},"
                         "{ATI, SB450},"
                         "{ATI, SB600},"
@@ -438,7 +437,6 @@ struct azx {
 /* driver types */
 enum {
        AZX_DRIVER_ICH,
-       AZX_DRIVER_PCH,
        AZX_DRIVER_SCH,
        AZX_DRIVER_ATI,
        AZX_DRIVER_ATIHDMI,
@@ -453,7 +451,6 @@ enum {
 
 static char *driver_short_names[] __devinitdata = {
        [AZX_DRIVER_ICH] = "HDA Intel",
-       [AZX_DRIVER_PCH] = "HDA Intel PCH",
        [AZX_DRIVER_SCH] = "HDA Intel MID",
        [AZX_DRIVER_ATI] = "HDA ATI SB",
        [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
@@ -1042,7 +1039,6 @@ static void azx_init_pci(struct azx *chip)
                                0x01, NVIDIA_HDA_ENABLE_COHBIT);
                break;
        case AZX_DRIVER_SCH:
-       case AZX_DRIVER_PCH:
                pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
                if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
                        pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
@@ -2226,25 +2222,9 @@ static int azx_dev_free(struct snd_device *device)
  * white/black-listing for position_fix
  */
 static struct snd_pci_quirk position_fix_list[] __devinitdata = {
-       SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
        {}
 };
 
@@ -2332,7 +2312,6 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
 static struct snd_pci_quirk msi_white_list[] __devinitdata = {
        SND_PCI_QUIRK(0x103c, 0x30f7, "HP Pavilion dv4t-1300", 1),
        SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
-       SND_PCI_QUIRK(0x107b, 0x0380, "Gateway M-6866", 1),
        {}
 };
 
@@ -2349,13 +2328,6 @@ static void __devinit check_msi(struct azx *chip)
                       "hda_intel: msi for device %04x:%04x set to %d\n",
                       q->subvendor, q->subdevice, q->value);
                chip->msi = q->value;
-               return;
-       }
-
-       /* NVidia chipsets seem to cause troubles with MSI */
-       if (chip->driver_type == AZX_DRIVER_NVIDIA) {
-               printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
-               chip->msi = 0;
        }
 }
 
@@ -2405,7 +2377,6 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
        if (bdl_pos_adj[dev] < 0) {
                switch (chip->driver_type) {
                case AZX_DRIVER_ICH:
-               case AZX_DRIVER_PCH:
                        bdl_pos_adj[dev] = 1;
                        break;
                default:
@@ -2680,9 +2651,6 @@ static struct pci_device_id azx_ids[] = {
        { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
        /* PCH */
        { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
-       { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
-       /* CPT */
-       { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
        /* ATI SB 450/600 */
index 3736bc4fda599bd352a68846203c3a5bf8f520f7..2d603f6aba6319bbf149a8dc5b966aec328aba8e 100644 (file)
@@ -1003,7 +1003,7 @@ static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x81cb, "ASUS M2N", AD1986A_3STACK),
        SND_PCI_QUIRK(0x1043, 0x8234, "ASUS M2N", AD1986A_3STACK),
        SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40-10Q", AD1986A_3STACK),
+       SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
        SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
        SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
        SND_PCI_QUIRK(0x144d, 0xc024, "Samsung P50", AD1986A_SAMSUNG_P50),
@@ -1789,14 +1789,6 @@ static int patch_ad1981(struct hda_codec *codec)
        case AD1981_THINKPAD:
                spec->mixers[0] = ad1981_thinkpad_mixers;
                spec->input_mux = &ad1981_thinkpad_capture_source;
-               /* set the upper-limit for mixer amp to 0dB for avoiding the
-                * possible damage by overloading
-                */
-               snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
-                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
-                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
-                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
-                                         (1 << AC_AMPCAP_MUTE_SHIFT));
                break;
        case AD1981_TOSHIBA:
                spec->mixers[0] = ad1981_hp_mixers;
@@ -3510,7 +3502,6 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
        /* Lenovo Thinkpad T61/X61 */
        SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
        SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
-       SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
        {}
 };
 
index 9d855f476f7a3fc38f17fa96b7d24322b577f875..905859d4f4dfdca60561e9bafd52492e8a053933 100644 (file)
@@ -1175,12 +1175,9 @@ static int patch_cxt5045(struct hda_codec *codec)
 
        switch (codec->subsystem_id >> 16) {
        case 0x103c:
-       case 0x1631:
-       case 0x1734:
-       case 0x17aa:
-               /* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have
-                * really bad sound over 0dB on NID 0x17. Fix max PCM level to
-                * 0 dB (originally it has 0x2b steps with 0dB offset 0x14)
+               /* HP laptop has a really bad sound over 0dB on NID 0x17.
+                * Fix max PCM level to 0 dB
+                * (originall it has 0x2b steps with 0dB offset 0x14)
                 */
                snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
                                          (0x14 << AC_AMPCAP_OFFSET_SHIFT) |
@@ -1584,21 +1581,6 @@ static int patch_cxt5047(struct hda_codec *codec)
 #endif 
        }
        spec->vmaster_nid = 0x13;
-
-       switch (codec->subsystem_id >> 16) {
-       case 0x103c:
-               /* HP laptops have really bad sound over 0 dB on NID 0x10.
-                * Fix max PCM level to 0 dB (originally it has 0x1e steps
-                * with 0 dB offset 0x17)
-                */
-               snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT,
-                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
-                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
-                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
-                                         (1 << AC_AMPCAP_MUTE_SHIFT));
-               break;
-       }
-
        return 0;
 }
 
@@ -2351,8 +2333,6 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
                      CXT5066_DELL_LAPTOP),
        SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
-       SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
-       SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
        {}
 };
 
index bc91a80d817b5259f95d0630dbee1543fdbb5a28..911dd1fec22ed286feaa411d18b97d4d92204095 100644 (file)
@@ -400,8 +400,6 @@ static int alc_mux_enum_info(struct snd_kcontrol *kcontrol,
        unsigned int mux_idx = snd_ctl_get_ioffidx(kcontrol, &uinfo->id);
        if (mux_idx >= spec->num_mux_defs)
                mux_idx = 0;
-       if (!spec->input_mux[mux_idx].num_items && mux_idx > 0)
-               mux_idx = 0;
        return snd_hda_input_mux_info(&spec->input_mux[mux_idx], uinfo);
 }
 
@@ -430,8 +428,6 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
 
        mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
        imux = &spec->input_mux[mux_idx];
-       if (!imux->num_items && mux_idx > 0)
-               imux = &spec->input_mux[0];
 
        type = get_wcaps_type(get_wcaps(codec, nid));
        if (type == AC_WID_AUD_MIX) {
@@ -3971,7 +3967,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
        SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
        SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
-       SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
+       SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL),
        SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
        SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
        SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
@@ -6252,7 +6248,6 @@ static const char *alc260_models[ALC260_MODEL_LAST] = {
 
 static struct snd_pci_quirk alc260_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER),
-       SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL),
        SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
        SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
        SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
@@ -6282,7 +6277,7 @@ static struct alc_config_preset alc260_presets[] = {
                .num_dacs = ARRAY_SIZE(alc260_dac_nids),
                .dac_nids = alc260_dac_nids,
                .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids),
-               .adc_nids = alc260_dual_adc_nids,
+               .adc_nids = alc260_adc_nids,
                .num_channel_mode = ARRAY_SIZE(alc260_modes),
                .channel_mode = alc260_modes,
                .input_mux = &alc260_capture_source,
@@ -6455,7 +6450,6 @@ static int patch_alc260(struct hda_codec *codec)
 
        spec->stream_analog_playback = &alc260_pcm_analog_playback;
        spec->stream_analog_capture = &alc260_pcm_analog_capture;
-       spec->stream_analog_alt_capture = &alc260_pcm_analog_capture;
 
        spec->stream_digital_playback = &alc260_pcm_digital_playback;
        spec->stream_digital_capture = &alc260_pcm_digital_capture;
@@ -6589,7 +6583,7 @@ static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
        .num_items = 4,
        .items = {
                { "Mic", 0x0 },
-               { "Int Mic", 0x1 },
+               { "iMic", 0x1 },
                { "Line", 0x2 },
                { "CD", 0x4 },
        },
@@ -8038,8 +8032,8 @@ static struct snd_kcontrol_new alc883_lenovo_nb0763_mixer[] = {
        HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
        HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
        HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-       HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+       HDA_CODEC_VOLUME("iMic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+       HDA_CODEC_MUTE("iMic Playback Switch", 0x0b, 0x1, HDA_INPUT),
        { } /* end */
 };
 
@@ -8896,7 +8890,6 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
 
        SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
-       SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
        SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
        SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
        SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
@@ -8924,7 +8917,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
        SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
        SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
        SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
-       SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG),
+       SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
 
        {}
 };
@@ -8938,12 +8931,10 @@ static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
        SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
        SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
-       SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889A_MB31),
        SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
        SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
        SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
        SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5),
-       SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC885_MB5),
        /* FIXME: HP jack sense seems not working for MBP 5,1 or 5,2,
         * so apparently no perfect solution yet
         */
@@ -9752,8 +9743,6 @@ static void alc882_auto_init_input_src(struct hda_codec *codec)
                        continue;
                mux_idx = c >= spec->num_mux_defs ? 0 : c;
                imux = &spec->input_mux[mux_idx];
-               if (!imux->num_items && mux_idx > 0)
-                       imux = &spec->input_mux[0];
                for (idx = 0; idx < conns; idx++) {
                        /* if the current connection is the selected one,
                         * unmute it as default - otherwise mute it
@@ -10624,13 +10613,6 @@ static struct hda_verb alc262_lenovo_3000_unsol_verbs[] = {
        {}
 };
 
-static struct hda_verb alc262_lenovo_3000_init_verbs[] = {
-       /* Front Mic pin: input vref at 50% */
-       {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50},
-       {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {}
-};
-
 static struct hda_input_mux alc262_fujitsu_capture_source = {
        .num_items = 3,
        .items = {
@@ -11698,8 +11680,7 @@ static struct alc_config_preset alc262_presets[] = {
        [ALC262_LENOVO_3000] = {
                .mixers = { alc262_lenovo_3000_mixer },
                .init_verbs = { alc262_init_verbs, alc262_EAPD_verbs,
-                               alc262_lenovo_3000_unsol_verbs,
-                               alc262_lenovo_3000_init_verbs },
+                               alc262_lenovo_3000_unsol_verbs },
                .num_dacs = ARRAY_SIZE(alc262_dac_nids),
                .dac_nids = alc262_dac_nids,
                .hp_nid = 0x03,
@@ -12389,9 +12370,6 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
                dac = 0x02;
                break;
        case 0x15:
-       case 0x1a: /* ALC259/269 only */
-       case 0x1b: /* ALC259/269 only */
-       case 0x21: /* ALC269vb has this pin, too */
                dac = 0x03;
                break;
        default:
@@ -17260,8 +17238,6 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
                return 0x02;
        else if (nid >= 0x0c && nid <= 0x0e)
                return nid - 0x0c + 0x02;
-       else if (nid == 0x26) /* ALC887-VD has this DAC too */
-               return 0x25;
        else
                return 0;
 }
@@ -17270,7 +17246,7 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
 static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin,
                                   hda_nid_t dac)
 {
-       hda_nid_t mix[5];
+       hda_nid_t mix[4];
        int i, num;
 
        num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix));
index a280a68293d377d86e913cdec87b6d134d07e463..86de305fc9f225ef382f6a7bdfbd438a2f97840f 100644 (file)
@@ -1592,18 +1592,12 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
                                "Dell Studio 1555", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
                                "Dell Studio 1557", STAC_DELL_M6_DMIC),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
-                               "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
-                               "Dell Studio 1558", STAC_DELL_M6_BOTH),
        {} /* terminator */
 };
 
 static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1,
                      "Alienware M17x", STAC_ALIENWARE_M17X),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
-                     "Alienware M17x", STAC_ALIENWARE_M17X),
        {} /* terminator */
 };
 
@@ -1718,8 +1712,6 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
                      "HP HDX", STAC_HP_HDX),  /* HDX16 */
        SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620,
                      "HP dv6", STAC_HP_DV5),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
-                     "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
        SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
                      "HP", STAC_HP_DV5),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
@@ -2061,12 +2053,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
        SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000,
                           "Intel D965", STAC_D965_3ST),
        /* Dell 3 stack systems */
+       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x01f7, "Dell XPS M1730", STAC_DELL_3ST),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x01dd, "Dell Dimension E520", STAC_DELL_3ST),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x01ed, "Dell     ", STAC_DELL_3ST),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x01f4, "Dell     ", STAC_DELL_3ST),
        /* Dell 3 stack systems with verb table in BIOS */
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x01f7, "Dell XPS M1730", STAC_DELL_BIOS),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x0227, "Dell Vostro 1400  ", STAC_DELL_BIOS),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x022e, "Dell     ", STAC_DELL_BIOS),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL,  0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS),
index 726fd4b92e19e8d564adef229c9c5201a0785963..3e1c20ae2f1c2c42c115c293bc26fdd6c660124f 100644 (file)
@@ -347,7 +347,7 @@ static int maya_gpio_sw_put(struct snd_kcontrol *kcontrol,
 
 /* known working input slots (0-4) */
 #define MAYA_LINE_IN   1       /* in-2 */
-#define MAYA_MIC_IN    3       /* in-4 */
+#define MAYA_MIC_IN    4       /* in-5 */
 
 static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line)
 {
@@ -393,8 +393,8 @@ static int maya_rec_src_put(struct snd_kcontrol *kcontrol,
        int changed;
 
        mutex_lock(&chip->mutex);
-       changed = maya_set_gpio_bits(chip->ice, 1 << GPIO_MIC_RELAY,
-                                    sel ? (1 << GPIO_MIC_RELAY) : 0);
+       changed = maya_set_gpio_bits(chip->ice, GPIO_MIC_RELAY,
+                                    sel ? GPIO_MIC_RELAY : 0);
        wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN);
        mutex_unlock(&chip->mutex);
        return changed;
index 1a76e63a1310d1983da66fe3dc3461adcf3385d5..aac20fb4aad23e0592cc658e45ea3cc6ced4c607 100644 (file)
@@ -1774,12 +1774,6 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
                .name = "HP/Compaq nx7010",
                .type = AC97_TUNE_MUTE_LED
         },
-       {
-               .subvendor = 0x1014,
-               .subdevice = 0x0534,
-               .name = "ThinkPad X31",
-               .type = AC97_TUNE_INV_EAPD
-       },
        {
                .subvendor = 0x1014,
                .subdevice = 0x1f00,
@@ -1864,12 +1858,6 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
                .name = "Dell Inspiron 8600",   /* STAC9750/51 */
                .type = AC97_TUNE_HP_ONLY
        },
-       {
-               .subvendor = 0x1028,
-               .subdevice = 0x0182,
-               .name = "Dell Latitude D610",   /* STAC9750/51 */
-               .type = AC97_TUNE_HP_ONLY
-       },
        {
                .subvendor = 0x1028,
                .subdevice = 0x0186,
index c2311f85a331605d2c525c564704f07d5829b0fa..75283fbb4b3fd1723d62a73baf663c14f37f41d3 100644 (file)
@@ -849,7 +849,6 @@ struct snd_m3 {
        struct snd_kcontrol *master_switch;
        struct snd_kcontrol *master_volume;
        struct tasklet_struct hwvol_tq;
-       unsigned int in_suspend;
 
 #ifdef CONFIG_PM
        u16 *suspend_mem;
@@ -885,7 +884,6 @@ static struct pci_device_id snd_m3_ids[] = {
 MODULE_DEVICE_TABLE(pci, snd_m3_ids);
 
 static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
-       SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
        SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
        SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
        SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
@@ -1615,11 +1613,6 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
        outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
        outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
 
-       /* Ignore spurious HV interrupts during suspend / resume, this avoids
-          mistaking them for a mute button press. */
-       if (chip->in_suspend)
-               return;
-
        if (!chip->master_switch || !chip->master_volume)
                return;
 
@@ -2431,7 +2424,6 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state)
        if (chip->suspend_mem == NULL)
                return 0;
 
-       chip->in_suspend = 1;
        snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
        snd_pcm_suspend_all(chip->pcm);
        snd_ac97_suspend(chip->ac97);
@@ -2505,7 +2497,6 @@ static int m3_resume(struct pci_dev *pci)
        snd_m3_hv_init(chip);
 
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
-       chip->in_suspend = 0;
        return 0;
 }
 #endif /* CONFIG_PM */
index 32f98535c0b436f9028d16147e1f5efab4b71aab..a83d1968a8450f49816add1bf2bbc2ffbba962c6 100644 (file)
@@ -1161,15 +1161,13 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private
                                unsigned long count, unsigned long pos)
 {
        struct mixart_mgr *mgr = entry->private_data;
-       unsigned long maxsize;
 
-       if (pos >= MIXART_BA0_SIZE)
-               return 0;
-       maxsize = MIXART_BA0_SIZE - pos;
-       if (count > maxsize)
-               count = maxsize;
        count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
-       if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count))
+       if(count <= 0)
+               return 0;
+       if(pos + count > MIXART_BA0_SIZE)
+               count = (long)(MIXART_BA0_SIZE - pos);
+       if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
                return -EFAULT;
        return count;
 }
@@ -1182,15 +1180,13 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private
                                unsigned long count, unsigned long pos)
 {
        struct mixart_mgr *mgr = entry->private_data;
-       unsigned long maxsize;
 
-       if (pos > MIXART_BA1_SIZE)
-               return 0;
-       maxsize = MIXART_BA1_SIZE - pos;
-       if (count > maxsize)
-               count = maxsize;
        count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
-       if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count))
+       if(count <= 0)
+               return 0;
+       if(pos + count > MIXART_BA1_SIZE)
+               count = (long)(MIXART_BA1_SIZE - pos);
+       if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
                return -EFAULT;
        return count;
 }
index 6811433a5f96ebcee3861d6effcc1d05ef695b78..72db4c39007fbc8bd8739819a580c0b2b2da48a8 100644 (file)
@@ -393,10 +393,6 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
                chip->model.suspend = claro_suspend;
                chip->model.resume = claro_resume;
                chip->model.set_adc_params = set_ak5385_params;
-               chip->model.device_config = PLAYBACK_0_TO_I2S |
-                                           PLAYBACK_1_TO_SPDIF |
-                                           CAPTURE_0_FROM_I2S_2 |
-                                           CAPTURE_1_FROM_SPDIF;
                break;
        }
        if (id->driver_data == MODEL_MERIDIAN ||
index 246b7c66eb0df2db2c3011a635c0cb950e2cd5f1..b5ca02e2038c9bb42c35c8a194c57974e15b6686 100644 (file)
@@ -1224,14 +1224,15 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
                    firmware.firmware.ASIC, firmware.firmware.CODEC,
                    firmware.firmware.AUXDSP, firmware.firmware.PROG);
 
-       if (!chip)
-               return 1;
-
        for (i = 0; i < FIRMWARE_VERSIONS; i++) {
                if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
-                       return 1; /* OK */
-
+                       break;
        }
+       if (i >= FIRMWARE_VERSIONS)
+               return 0; /* no match */
+
+       if (!chip)
+               return 1; /* OK */
 
        snd_printdd("Writing Firmware\n");
        if (!chip->fw_entry) {
index 401518c8c86dcd17c27ac660954524e008ed37fd..7bb827c7d8061bfea72e0c2579015f8668bbebc9 100644 (file)
@@ -4610,7 +4610,6 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                if (err < 0)
                        return err;
 
-               memset(&info, 0, sizeof(info));
                spin_lock_irqsave(&hdsp->lock, flags);
                info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
                info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
index ec2125caa640151c9652e0aea0c4a32425b26e4a..0dce331a2a3b105e565e7deaa315fdb39c575d4d 100644 (file)
@@ -4127,7 +4127,6 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
 
        case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
 
-               memset(&info, 0, sizeof(info));
                spin_lock_irq(&hdspm->lock);
                info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
                info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
index 03d6aea19749031f53404682748ef6186ae0edf0..8a332d2f615cfc01e4612a9eb459628e8596c59b 100644 (file)
@@ -1790,12 +1790,6 @@ static struct ac97_quirk ac97_quirks[] = {
                .name = "ASRock K7VT2",
                .type = AC97_TUNE_HP_ONLY
        },
-       {
-               .subvendor = 0x110a,
-               .subdevice = 0x0079,
-               .name = "Fujitsu Siemens D1289",
-               .type = AC97_TUNE_HP_ONLY
-       },
        {
                .subvendor = 0x1019,
                .subdevice = 0x0a81,
index 10c7550393ba93be17886d637aec0ad7aaacc362..4d47bc4f74284764ec24434167371d3245009df8 100644 (file)
@@ -90,10 +90,12 @@ static int ak4104_spi_write(struct snd_soc_codec *codec, unsigned int reg,
        if (reg >= codec->reg_cache_size)
                return -EINVAL;
 
+       reg &= AK4104_REG_MASK;
+       reg |= AK4104_WRITE;
+
        /* only write to the hardware if value has changed */
        if (cache[reg] != value) {
-               u8 tmp[2] = { (reg & AK4104_REG_MASK) | AK4104_WRITE, value };
-
+               u8 tmp[2] = { reg, value };
                if (spi_write(spi, tmp, sizeof(tmp))) {
                        dev_err(&spi->dev, "SPI write failed\n");
                        return -EIO;
index 9f9bcd839cba78805318bf89fa499bf9a1abd982..2089fe7d1ba64523f2962410604fbaae38958575 100644 (file)
@@ -423,8 +423,8 @@ static const struct soc_enum wm8350_enum[] = {
        SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr),
 };
 
-static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0);
-static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0);
+static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525);
+static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600);
 static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1);
 static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
 static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
index 775195bf59f9b0fb9b13e3dfa57e4e8be6e06ddc..b9ef4d9152211a7649249e0f6a96c50f484147b0 100644 (file)
@@ -106,21 +106,21 @@ static void wm8400_codec_reset(struct snd_soc_codec *codec)
        wm8400_reset_codec_reg_cache(wm8400->wm8400);
 }
 
-static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
+static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
 
-static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
+static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
 
-static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0);
+static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0);
 
-static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
+static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
 
-static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
+static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
 
-static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
+static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
 
-static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
+static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
 
-static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
+static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
 
 static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
         struct snd_ctl_elem_value *ucontrol)
@@ -439,7 +439,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
 /* INMIX dB values */
 static const unsigned int in_mix_tlv[] = {
        TLV_DB_RANGE_HEAD(1),
-       0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
+       0,7, TLV_DB_LINEAR_ITEM(-1200, 600),
 };
 
 /* Left In PGA Connections */
index c1e767d2e2204ca393cd60223c2013a1123d6d84..6bded8c78150d7f64c52cf7cc4eca8fd89df0866 100644 (file)
@@ -268,9 +268,9 @@ SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4,  2, 3, 1, 0),
 SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4,  4, 5, 1, 0),
 
 SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0),
-SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1),
-SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1),
-SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1),
+SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 0),
+SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 0),
+SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 0),
 
 SOC_DOUBLE("ADC Mute Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 0),
 SOC_SINGLE("ADC High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
index 38a53a689acaa757600e4ffea134a41596c16d75..a9829aa26e53591a0574d44f884c5fccee5f0b29 100644 (file)
@@ -93,6 +93,7 @@ SOC_DAPM_SINGLE("Bypass Switch", WM8776_OUTMUX, 2, 1, 0),
 
 static const struct snd_soc_dapm_widget wm8776_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("AUX"),
+SND_SOC_DAPM_INPUT("AUX"),
 
 SND_SOC_DAPM_INPUT("AIN1"),
 SND_SOC_DAPM_INPUT("AIN2"),
@@ -177,6 +178,13 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_LEFT_J:
                iface |= 0x0001;
                break;
+               /* FIXME: CHECK A/B */
+       case SND_SOC_DAIFMT_DSP_A:
+               iface |= 0x0003;
+               break;
+       case SND_SOC_DAIFMT_DSP_B:
+               iface |= 0x0007;
+               break;
        default:
                return -EINVAL;
        }
index 253159cedea627c61769e846ab91f4d3ddf641fe..2d702db4131db36f17d854621c99767777c64c17 100644 (file)
@@ -110,21 +110,21 @@ static const u16 wm8990_reg[] = {
 
 #define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)
 
-static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
+static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
 
-static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
+static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
 
-static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0);
+static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
 
-static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
+static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
 
-static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
+static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
 
-static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
+static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
 
-static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
+static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
 
-static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
+static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
 
 static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
@@ -450,7 +450,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
 /* INMIX dB values */
 static const unsigned int in_mix_tlv[] = {
        TLV_DB_RANGE_HEAD(1),
-       0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
+       0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
 };
 
 /* Left In PGA Connections */
index 79633eae6439a5948b0a663ce61ea86e1f4624a6..f26a1254b0bb595a1d3e260d694655e7ba662573 100644 (file)
@@ -3325,32 +3325,6 @@ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
        return err;
 }
 
-/*
- * This call will put the synth in "USB send" mode, i.e it will send MIDI
- * messages through USB (this is disabled at startup). The synth will
- * acknowledge by sending a sysex on endpoint 0x85 and by displaying a USB
- * sign on its LCD. Values here are chosen based on sniffing USB traffic
- * under Windows.
- */
-static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
-{
-       int err, actual_length;
-
-       /* "midi send" enable */
-       static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 };
-
-       void *buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x05), buf,
-                       ARRAY_SIZE(seq), &actual_length, 1000);
-       kfree(buf);
-       if (err < 0)
-               return err;
-
-       return 0;
-}
-
 /*
  * Setup quirks
  */
@@ -3642,12 +3616,6 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
                        goto __err_val;
        }
 
-       /* Access Music VirusTI Desktop */
-       if (id == USB_ID(0x133e, 0x0815)) {
-               if (snd_usb_accessmusic_boot_quirk(dev) < 0)
-                       goto __err_val;
-       }
-
        /*
         * found a config.  now register to ALSA
         */
index 64d8d2e6026df684a1a0c298f5aabbeb361c57f1..0eff19ceb7e1f9d74653d80c402d3930a6be8bf8 100644 (file)
@@ -931,8 +931,6 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
        DEFINE_WAIT(wait);
        long timeout = msecs_to_jiffies(50);
 
-       if (ep->umidi->disconnected)
-               return;
        /*
         * The substream buffer is empty, but some data might still be in the
         * currently active URBs, so we have to wait for those to complete.
@@ -1077,21 +1075,14 @@ static unsigned int snd_usbmidi_count_bits(unsigned int x)
  * Frees an output endpoint.
  * May be called when ep hasn't been initialized completely.
  */
-static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
+static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
 {
        unsigned int i;
 
        for (i = 0; i < OUTPUT_URBS; ++i)
-               if (ep->urbs[i].urb) {
+               if (ep->urbs[i].urb)
                        free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
                                            ep->max_transfer);
-                       ep->urbs[i].urb = NULL;
-               }
-}
-
-static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
-{
-       snd_usbmidi_out_endpoint_clear(ep);
        kfree(ep);
 }
 
@@ -1210,18 +1201,15 @@ void snd_usbmidi_disconnect(struct list_head* p)
                                usb_kill_urb(ep->out->urbs[j].urb);
                        if (umidi->usb_protocol_ops->finish_out_endpoint)
                                umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
-                       ep->out->active_urbs = 0;
-                       if (ep->out->drain_urbs) {
-                               ep->out->drain_urbs = 0;
-                               wake_up(&ep->out->drain_wait);
-                       }
                }
                if (ep->in)
                        for (j = 0; j < INPUT_URBS; ++j)
                                usb_kill_urb(ep->in->urbs[j]);
                /* free endpoints here; later call can result in Oops */
-               if (ep->out)
-                       snd_usbmidi_out_endpoint_clear(ep->out);
+               if (ep->out) {
+                       snd_usbmidi_out_endpoint_delete(ep->out);
+                       ep->out = NULL;
+               }
                if (ep->in) {
                        snd_usbmidi_in_endpoint_delete(ep->in);
                        ep->in = NULL;
@@ -1372,12 +1360,6 @@ static struct port_info {
        EXTERNAL_PORT(0x086a, 0x0001, 8, "%s Broadcast"),
        EXTERNAL_PORT(0x086a, 0x0002, 8, "%s Broadcast"),
        EXTERNAL_PORT(0x086a, 0x0003, 4, "%s Broadcast"),
-       /* Access Music Virus TI */
-       EXTERNAL_PORT(0x133e, 0x0815, 0, "%s MIDI"),
-       PORT_INFO(0x133e, 0x0815, 1, "%s Synth", 0,
-               SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC |
-               SNDRV_SEQ_PORT_TYPE_HARDWARE |
-               SNDRV_SEQ_PORT_TYPE_SYNTHESIZER),
 };
 
 static struct port_info *find_port_info(struct snd_usb_midi* umidi, int number)
index 391e02f5b1996ac0e829b77c6fc306faaf543add..f6f201eb24ceeb00145b406d6fbf18bf09d08817 100644 (file)
@@ -2050,33 +2050,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
        }
 },
 
-/* Access Music devices */
-{
-       /* VirusTI Desktop */
-       USB_DEVICE_VENDOR_SPEC(0x133e, 0x0815),
-       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
-               .ifnum = QUIRK_ANY_INTERFACE,
-               .type = QUIRK_COMPOSITE,
-               .data = &(const struct snd_usb_audio_quirk[]) {
-                       {
-                               .ifnum = 3,
-                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
-                               .data = &(const struct snd_usb_midi_endpoint_info) {
-                                       .out_cables = 0x0003,
-                                       .in_cables  = 0x0003
-                               }
-                       },
-                       {
-                               .ifnum = 4,
-                               .type = QUIRK_IGNORE_INTERFACE
-                       },
-                       {
-                               .ifnum = -1
-                       }
-               }
-       }
-},
-
 /* */
 {
        /* aka. Serato Scratch Live DJ Box */
index bd498d496952a6f78973b0e6b8422552009770d0..bdd3b7ecad0a6dedbf6fc673f7daf3e40c9c49ce 100644 (file)
@@ -24,10 +24,7 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
 DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
 DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
 
-# Make the path relative to DESTDIR, not prefix
-ifndef DESTDIR
 prefix?=$(HOME)
-endif
 bindir?=$(prefix)/bin
 htmldir?=$(prefix)/share/doc/perf-doc
 pdfdir?=$(prefix)/share/doc/perf-doc
@@ -35,6 +32,7 @@ mandir?=$(prefix)/share/man
 man1dir=$(mandir)/man1
 man5dir=$(mandir)/man5
 man7dir=$(mandir)/man7
+# DESTDIR=
 
 ASCIIDOC=asciidoc
 ASCIIDOC_EXTRA = --unsafe
index 719d0283c9f383f565ad087aa5b7fb919ec57338..7e190d522cd5848123b0d98220e6555fd3e70c69 100644 (file)
@@ -218,10 +218,7 @@ STRIP ?= strip
 # runtime figures out where they are based on the path to the executable.
 # This can help installing the suite in a relocatable way.
 
-# Make the path relative to DESTDIR, not to prefix
-ifndef DESTDIR
 prefix = $(HOME)
-endif
 bindir_relative = bin
 bindir = $(prefix)/$(bindir_relative)
 mandir = share/man
@@ -238,6 +235,7 @@ sysconfdir = $(prefix)/etc
 ETC_PERFCONFIG = etc/perfconfig
 endif
 lib = lib
+# DESTDIR=
 
 export prefix bindir sharedir sysconfdir
 
index 3c6d14132ff091a19fc3706402972eae6cc62075..43cf3ea9e088fd86953637450036ecb56f3964fd 100644 (file)
@@ -48,10 +48,6 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->brothers);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
-
-       node->children_hit = 0;
-       node->parent = NULL;
-       node->hit = 0;
 }
 
 static inline u64 cumul_hits(struct callchain_node *node)
index 4f3434fbb086a2e20ea662ba28530d20040acadc..7495ce3473448cd45ee1dba9e0ae441c83245ca4 100644 (file)
@@ -1226,7 +1226,7 @@ skip_lpage:
 
        /* Allocate page dirty bitmap if needed */
        if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
-               unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
+               unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
 
                new.dirty_bitmap = vmalloc(dirty_bytes);
                if (!new.dirty_bitmap)
@@ -1309,7 +1309,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
 {
        struct kvm_memory_slot *memslot;
        int r, i;
-       unsigned long n;
+       int n;
        unsigned long any = 0;
 
        r = -EINVAL;
@@ -1321,7 +1321,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
        if (!memslot->dirty_bitmap)
                goto out;
 
-       n = kvm_dirty_bitmap_bytes(memslot);
+       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 
        for (i = 0; !any && i < n/sizeof(long); ++i)
                any = memslot->dirty_bitmap[i];
@@ -1663,13 +1663,10 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
        memslot = gfn_to_memslot_unaliased(kvm, gfn);
        if (memslot && memslot->dirty_bitmap) {
                unsigned long rel_gfn = gfn - memslot->base_gfn;
-               unsigned long *p = memslot->dirty_bitmap +
-                                       rel_gfn / BITS_PER_LONG;
-               int offset = rel_gfn % BITS_PER_LONG;
 
                /* avoid RMW */
-               if (!test_bit(offset, p))
-                       set_bit(offset, p);
+               if (!test_bit(rel_gfn, memslot->dirty_bitmap))
+                       set_bit(rel_gfn, memslot->dirty_bitmap);
        }
 }