Merge tag 'staging-3.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Jul 2013 18:40:23 +0000 (11:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Jul 2013 18:40:23 +0000 (11:40 -0700)
Pull staging tree update from Greg KH:
 "Here's the large staging tree merge for 3.11-rc1

  Huge thing here is the Lustre client code.  Unfortunatly, due to it
  not building properly on a wide variety of different architectures
  (this was production code???), it is currently disabled from the build
  so as to not annoy people.

  Other than Lustre, there are loads of comedi patches, working to clean
  up that subsystem, iio updates and new drivers, and a load of cleanups
  from the OPW applicants in their quest to get a summer internship.

  All of these have been in the linux-next releases for a while (hence
  the Lustre code being disabled)"

Fixed up trivial conflict in drivers/staging/serqt_usb2/serqt_usb2.c due
to independent renamings in the staging driver cleanup and the USB
tree..

* tag 'staging-3.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (868 commits)
  Revert "Revert "Revert "staging/lustre: drop CONFIG_BROKEN dependency"""
  staging: rtl8192u: fix line length in r819xU_phy.h
  staging: rtl8192u: rename variables in r819xU_phy.h
  staging: rtl8192u: fix comments in r819xU_phy.h
  staging: rtl8192u: fix whitespace in r819xU_phy.h
  staging: rtl8192u: fix newlines in r819xU_phy.c
  staging: comedi: unioxx5: use comedi_alloc_spriv()
  staging: comedi: unioxx5: fix unioxx5_detach()
  silicom: checkpatch: errors caused by macros
  Staging: silicom: remove the board_t typedef in bpctl_mod.c
  Staging: silicom: capitalize labels in the bp_media_type enum
  Staging: silicom: remove bp_media_type enum typedef
  staging: rtl8192u: replace msleep(1) with usleep_range() in r819xU_phy.c
  staging: rtl8192u: rename dwRegRead and rtStatus in r819xU_phy.c
  staging: rtl8192u: replace __FUNCTION__ in r819xU_phy.c
  staging: rtl8192u: limit line size in r819xU_phy.c
  zram: allow request end to coincide with disksize
  staging: drm/imx: use generic irq chip unused field to block out invalid irqs
  staging: drm/imx: use generic irqchip
  staging: drm/imx: ipu-dmfc: use defines for ipu channel numbers
  ...

864 files changed:
Documentation/ABI/testing/configfs-usb-gadget [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-acm [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-ecm [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-eem [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-ncm [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-obex [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-phonet [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-rndis [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-serial [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-subset [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-usb
Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
Documentation/DocBook/media/v4l/dev-codec.xml
Documentation/DocBook/media/v4l/v4l2.xml
Documentation/console/console.txt
Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
Documentation/devicetree/bindings/tty/serial/fsl-lpuart.txt [new file with mode: 0644]
Documentation/devicetree/bindings/usb/ci13xxx-imx.txt
Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt
Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt
Documentation/devicetree/bindings/usb/usb3503.txt
Documentation/filesystems/Locking
Documentation/filesystems/f2fs.txt
Documentation/filesystems/porting
Documentation/filesystems/vfs.txt
Documentation/networking/ip-sysctl.txt
Documentation/serial/00-INDEX
Documentation/serial/stallion.txt [deleted file]
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/usb/gadget_configfs.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/alpha/include/asm/pgtable.h
arch/alpha/kernel/console.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/pci-sysfs.c
arch/alpha/kernel/process.c
arch/arc/include/asm/pgtable.h
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/exynos5250-pinctrl.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/tegra20-colibri-512.dtsi
arch/arm/boot/dts/tegra20-harmony.dts
arch/arm/boot/dts/tegra20-iris-512.dts
arch/arm/boot/dts/tegra20-paz00.dts
arch/arm/boot/dts/tegra20-seaboard.dts
arch/arm/boot/dts/tegra20-tamonten.dtsi
arch/arm/boot/dts/tegra20-trimslice.dts
arch/arm/boot/dts/tegra20-ventana.dts
arch/arm/boot/dts/tegra20-whistler.dts
arch/arm/boot/dts/tegra20.dtsi
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/glue-proc.h
arch/arm/include/asm/pgtable-nommu.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/smp_plat.h
arch/arm/kernel/devtree.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/process.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/mm/cache-v7.S
arch/arm/mm/flush.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-fa526.S
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-v7.S
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/perf_event.c
arch/avr32/include/asm/pgtable.h
arch/blackfin/include/asm/pgtable.h
arch/c6x/include/asm/pgtable.h
arch/cris/include/asm/pgtable.h
arch/frv/include/asm/pgtable.h
arch/h8300/include/asm/pgtable.h
arch/hexagon/include/asm/pgtable.h
arch/ia64/include/asm/irqflags.h
arch/ia64/include/asm/pgtable.h
arch/m32r/include/asm/pgtable.h
arch/m68k/include/asm/pgtable_mm.h
arch/m68k/include/asm/pgtable_no.h
arch/metag/include/asm/hugetlb.h
arch/metag/include/asm/pgtable.h
arch/microblaze/include/asm/pgtable.h
arch/mips/include/asm/pgtable.h
arch/mips/pci/pci-bcm1480.c
arch/mips/pci/pci-sb1250.c
arch/mn10300/include/asm/irqflags.h
arch/mn10300/include/asm/pgtable.h
arch/mn10300/include/asm/smp.h
arch/mn10300/include/asm/uaccess.h
arch/mn10300/kernel/setup.c
arch/mn10300/unit-asb2305/pci-asb2305.c
arch/openrisc/include/asm/pgtable.h
arch/parisc/hpux/fs.c
arch/parisc/include/asm/mmzone.h
arch/parisc/include/asm/pci.h
arch/parisc/include/asm/pgtable.h
arch/parisc/kernel/hardware.c
arch/parisc/kernel/pacache.S
arch/parisc/kernel/pci.c
arch/parisc/kernel/setup.c
arch/parisc/mm/init.c
arch/powerpc/include/asm/mpc52xx_psc.h
arch/powerpc/include/asm/pgtable.h
arch/powerpc/kernel/pci-common.c
arch/powerpc/kvm/booke.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/pseries/eeh_cache.c
arch/powerpc/platforms/pseries/eeh_pe.c
arch/powerpc/sysdev/fsl_pci.c
arch/s390/include/asm/dma-mapping.h
arch/s390/include/asm/pgtable.h
arch/s390/kernel/ipl.c
arch/s390/kernel/irq.c
arch/s390/mm/mem_detect.c
arch/score/include/asm/pgtable.h
arch/sh/include/asm/pgtable.h
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/leon_amba.h
arch/sparc/include/asm/linkage.h [deleted file]
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/ds.c
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_pci_grpci1.c
arch/sparc/kernel/leon_pmc.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/setup_64.c
arch/sparc/mm/init_64.c
arch/sparc/mm/tlb.c
arch/sparc/prom/bootstr_32.c
arch/sparc/prom/tree_64.c
arch/tile/include/asm/pgtable.h
arch/tile/lib/exports.c
arch/um/drivers/mconsole_kern.c
arch/um/include/asm/pgtable.h
arch/unicore32/include/asm/pgtable.h
arch/x86/Kconfig
arch/x86/crypto/aesni-intel_asm.S
arch/x86/ia32/ia32_aout.c
arch/x86/include/asm/irq.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/nmi.h
arch/x86/include/asm/pgtable.h
arch/x86/kernel/apic/hw_nmi.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/x86.c
arch/x86/platform/efi/efi.c
arch/xtensa/include/asm/pgtable.h
crypto/algboss.c
crypto/api.c
crypto/internal.h
drivers/acpi/acpi_lpss.c
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/internal.h
drivers/acpi/power.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/ata/libata-acpi.c
drivers/ata/libata-core.c
drivers/ata/libata.h
drivers/base/firmware_class.c
drivers/block/cryptoloop.c
drivers/block/loop.c
drivers/block/loop.h [new file with mode: 0644]
drivers/block/rbd.c
drivers/bluetooth/btmrvl_main.c
drivers/char/Kconfig
drivers/clk/clk.c
drivers/clk/samsung/clk-exynos5250.c
drivers/clk/samsung/clk-pll.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/tegra/clk-tegra30.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/gpio/gpio-omap.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/Kconfig
drivers/input/serio/Kconfig
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/cyttsp_core.c
drivers/input/touchscreen/cyttsp_core.h
drivers/irqchip/irq-gic.c
drivers/media/Kconfig
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/pci/cx88/cx88-alsa.c
drivers/media/pci/cx88/cx88-video.c
drivers/media/platform/coda.c
drivers/media/platform/davinci/vpbe_display.c
drivers/media/platform/davinci/vpfe_capture.c
drivers/media/platform/exynos4-is/fimc-is-regs.c
drivers/media/platform/exynos4-is/fimc-is.c
drivers/media/platform/exynos4-is/fimc-is.h
drivers/media/platform/exynos4-is/fimc-isp.c
drivers/media/platform/exynos4-is/mipi-csis.c
drivers/media/platform/s3c-camif/camif-core.h
drivers/media/platform/s5p-jpeg/Makefile
drivers/media/platform/s5p-mfc/Makefile
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/s5p-mfc/s5p_mfc_common.h
drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
drivers/media/platform/sh_veu.c
drivers/media/platform/soc_camera/soc_camera.c
drivers/media/radio/Kconfig
drivers/media/radio/radio-si476x.c
drivers/media/tuners/Kconfig
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/gspca/sonixb.c
drivers/media/usb/pwc/pwc.h
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/media/v4l2-core/v4l2-mem2mem.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/mfd/tps6586x.c
drivers/net/bonding/bond_main.c
drivers/net/can/usb/usb_8dev.c
drivers/net/ethernet/atheros/Kconfig
drivers/net/ethernet/atheros/Makefile
drivers/net/ethernet/atheros/alx/Makefile [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/alx.h [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/ethtool.c [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/hw.c [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/hw.h [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/main.c [new file with mode: 0644]
drivers/net/ethernet/atheros/alx/reg.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvtap.c
drivers/net/tun.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/net/wan/dlci.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/iwlegacy/3945-rs.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/parisc/iosapic.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci.h
drivers/pci/setup-bus.c
drivers/regulator/tps6586x-regulator.c
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-s3c64xx.c
drivers/staging/media/davinci_vpfe/Kconfig
drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
drivers/staging/media/solo6x10/Kconfig
drivers/staging/serqt_usb2/serqt_usb2.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/tty/Makefile
drivers/tty/hvc/hvc_iucv.c
drivers/tty/n_tty.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_gsc.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/Kconfig
drivers/tty/serial/Makefile
drivers/tty/serial/altera_uart.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/fsl_lpuart.c [new file with mode: 0644]
drivers/tty/serial/imx.c
drivers/tty/serial/mfd.c
drivers/tty/serial/mpc52xx_uart.c
drivers/tty/serial/of_serial.c
drivers/tty/serial/omap-serial.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/samsung.c
drivers/tty/serial/sc26xx.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sirfsoc_uart.c
drivers/tty/serial/sunhv.c
drivers/tty/serial/sunsab.c
drivers/tty/serial/sunsu.c
drivers/tty/serial/sunzilog.c
drivers/tty/serial/ucc_uart.c
drivers/tty/serial/vt8500_serial.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/sysrq.c
drivers/tty/tty_io.c
drivers/tty/tty_ldsem.c [new file with mode: 0644]
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/Kconfig
drivers/usb/Makefile
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/Makefile
drivers/usb/chipidea/bits.h
drivers/usb/chipidea/ci.h
drivers/usb/chipidea/ci13xxx_imx.c [deleted file]
drivers/usb/chipidea/ci13xxx_imx.h [deleted file]
drivers/usb/chipidea/ci13xxx_msm.c [deleted file]
drivers/usb/chipidea/ci13xxx_pci.c [deleted file]
drivers/usb/chipidea/ci_hdrc_imx.c [new file with mode: 0644]
drivers/usb/chipidea/ci_hdrc_imx.h [new file with mode: 0644]
drivers/usb/chipidea/ci_hdrc_msm.c [new file with mode: 0644]
drivers/usb/chipidea/ci_hdrc_pci.c [new file with mode: 0644]
drivers/usb/chipidea/core.c
drivers/usb/chipidea/debug.c
drivers/usb/chipidea/debug.h
drivers/usb/chipidea/host.c
drivers/usb/chipidea/host.h
drivers/usb/chipidea/udc.c
drivers/usb/chipidea/udc.h
drivers/usb/chipidea/usbmisc_imx.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/usbtmc.c
drivers/usb/core/devio.c
drivers/usb/core/file.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/hub.h
drivers/usb/core/message.c
drivers/usb/core/port.c
drivers/usb/core/sysfs.c
drivers/usb/core/usb.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/Makefile
drivers/usb/gadget/cdc2.c
drivers/usb/gadget/ether.c
drivers/usb/gadget/f_ecm.c
drivers/usb/gadget/f_eem.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/f_ncm.c
drivers/usb/gadget/f_obex.c
drivers/usb/gadget/f_phonet.c
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/f_subset.c
drivers/usb/gadget/f_uac2.c
drivers/usb/gadget/f_uvc.c
drivers/usb/gadget/fotg210-udc.c [new file with mode: 0644]
drivers/usb/gadget/fotg210.h [new file with mode: 0644]
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/fusb300_udc.c
drivers/usb/gadget/g_ffs.c
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/multi.c
drivers/usb/gadget/mv_u3d_core.c
drivers/usb/gadget/ncm.c
drivers/usb/gadget/nokia.c
drivers/usb/gadget/pxa27x_udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/rndis.h
drivers/usb/gadget/u_ecm.h [new file with mode: 0644]
drivers/usb/gadget/u_eem.h [new file with mode: 0644]
drivers/usb/gadget/u_ether.c
drivers/usb/gadget/u_ether.h
drivers/usb/gadget/u_ether_configfs.h [new file with mode: 0644]
drivers/usb/gadget/u_gether.h [new file with mode: 0644]
drivers/usb/gadget/u_ncm.h [new file with mode: 0644]
drivers/usb/gadget/u_phonet.h
drivers/usb/gadget/u_rndis.h [new file with mode: 0644]
drivers/usb/gadget/uvc_queue.c
drivers/usb/host/Kconfig
drivers/usb/host/Makefile
drivers/usb/host/ehci-atmel.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-grlib.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-mv.c
drivers/usb/host/ehci-mxc.c
drivers/usb/host/ehci-octeon.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ehci-platform.c
drivers/usb/host/ehci-pmcmsp.c
drivers/usb/host/ehci-ppc-of.c
drivers/usb/host/ehci-s5p.c
drivers/usb/host/ehci-sead3.c
drivers/usb/host/ehci-sh.c
drivers/usb/host/ehci-spear.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/ehci-tilegx.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/ehci.h
drivers/usb/host/fhci-sched.c
drivers/usb/host/fhci.h
drivers/usb/host/fusbh200-hcd.c [new file with mode: 0644]
drivers/usb/host/fusbh200.h [new file with mode: 0644]
drivers/usb/host/hwa-hc.c
drivers/usb/host/imx21-hcd.c
drivers/usb/host/isp1760-if.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-da8xx.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/ohci-jz4740.c
drivers/usb/host/ohci-nxp.c
drivers/usb/host/ohci-octeon.c
drivers/usb/host/ohci-omap.c
drivers/usb/host/ohci-omap3.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/ohci-platform.c
drivers/usb/host/ohci-ppc-of.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/ohci-q.c
drivers/usb/host/ohci-sm501.c
drivers/usb/host/ohci-spear.c
drivers/usb/host/ohci-tilegx.c
drivers/usb/host/ohci-tmio.c
drivers/usb/host/ohci.h
drivers/usb/host/oxu210hp-hcd.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/uhci-grlib.c
drivers/usb/host/uhci-platform.c
drivers/usb/host/whci/hcd.c
drivers/usb/host/xhci-dbg.c
drivers/usb/host/xhci-ext-caps.h
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/adutux.c
drivers/usb/misc/sisusbvga/sisusb_con.c
drivers/usb/misc/usb3503.c
drivers/usb/musb/Kconfig
drivers/usb/musb/Makefile
drivers/usb/musb/blackfin.c
drivers/usb/musb/da8xx.c
drivers/usb/musb/davinci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_host.c
drivers/usb/musb/musb_host.h
drivers/usb/musb/musb_virthub.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/tusb6010.c
drivers/usb/musb/ux500.c
drivers/usb/musb/ux500_dma.c
drivers/usb/phy/Kconfig
drivers/usb/phy/Makefile
drivers/usb/phy/of.c [new file with mode: 0644]
drivers/usb/phy/phy-ab8500-usb.c
drivers/usb/phy/phy-nop.c
drivers/usb/phy/phy-omap-usb3.c
drivers/usb/phy/phy-rcar-usb.c
drivers/usb/phy/phy-samsung-usb.c
drivers/usb/phy/phy-samsung-usb.h
drivers/usb/phy/phy-samsung-usb2.c
drivers/usb/phy/phy-samsung-usb3.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/phy/phy-ulpi-viewport.c
drivers/usb/serial/Kconfig
drivers/usb/serial/Makefile
drivers/usb/serial/ark3116.c
drivers/usb/serial/bus.c
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/f81232.c
drivers/usb/serial/flashloader.c [new file with mode: 0644]
drivers/usb/serial/garmin_gps.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/metro-usb.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/opticon.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/quatech2.c
drivers/usb/serial/sierra.c
drivers/usb/serial/ssu100.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/ti_usb_3410_5052.h
drivers/usb/serial/usb-serial.c
drivers/usb/serial/usb_wwan.c
drivers/usb/serial/whiteheat.c
drivers/usb/storage/alauda.c
drivers/usb/storage/sddr09.c
drivers/usb/usb-common.c
drivers/usb/wusbcore/devconnect.c
drivers/usb/wusbcore/mmc.c
drivers/usb/wusbcore/pal.c
drivers/usb/wusbcore/reservation.c
drivers/usb/wusbcore/rh.c
drivers/usb/wusbcore/wa-nep.c
drivers/usb/wusbcore/wa-rpipe.c
drivers/usb/wusbcore/wa-xfer.c
drivers/usb/wusbcore/wusbhc.c
drivers/usb/wusbcore/wusbhc.h
drivers/uwb/drp.c
drivers/uwb/est.c
drivers/uwb/hwa-rc.c
drivers/uwb/pal.c
drivers/uwb/rsv.c
drivers/uwb/uwb-internal.h
drivers/uwb/whci.c
drivers/vfio/pci/vfio_pci.c
drivers/video/au1100fb.c
drivers/video/au1200fb.c
drivers/video/console/fbcon.c
drivers/video/console/mdacon.c
drivers/video/console/newport_con.c
drivers/video/console/sticon.c
drivers/video/pxa3xx-gcu.c
fs/9p/vfs_addr.c
fs/9p/vfs_dir.c
fs/adfs/dir.c
fs/affs/dir.c
fs/afs/dir.c
fs/afs/file.c
fs/autofs4/root.c
fs/bad_inode.c
fs/befs/linuxvfs.c
fs/bfs/dir.c
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/buffer.c
fs/cachefiles/interface.c
fs/cachefiles/namei.c
fs/cachefiles/xattr.c
fs/ceph/addr.c
fs/ceph/dir.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/file.c
fs/cifs/readdir.c
fs/coda/dir.c
fs/compat.c
fs/compat_ioctl.c
fs/configfs/dir.c
fs/cramfs/inode.c
fs/dcache.c
fs/dlm/config.c
fs/dlm/lock.c
fs/dlm/lockspace.c
fs/dlm/lowcomms.c
fs/ecryptfs/file.c
fs/efs/dir.c
fs/exec.c
fs/exofs/dir.c
fs/exofs/inode.c
fs/exportfs/expfs.c
fs/ext2/dir.c
fs/ext3/dir.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext4/balloc.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/extents_status.h
fs/ext4/file.c
fs/ext4/fsync.c
fs/ext4/ialloc.c
fs/ext4/indirect.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/resize.c
fs/ext4/super.c
fs/f2fs/Kconfig
fs/f2fs/acl.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/f2fs/xattr.h
fs/fat/dir.c
fs/freevxfs/vxfs_lookup.c
fs/fs-writeback.c
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/fsdef.c
fs/fscache/internal.h
fs/fscache/main.c
fs/fscache/netfs.c
fs/fscache/object-list.c
fs/fscache/object.c
fs/fscache/operation.c
fs/fscache/page.c
fs/fuse/dir.c
fs/fuse/file.c
fs/gfs2/aops.c
fs/gfs2/bmap.c
fs/gfs2/dir.c
fs/gfs2/dir.h
fs/gfs2/export.c
fs/gfs2/file.c
fs/gfs2/glops.c
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/lops.c
fs/gfs2/lops.h
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/gfs2/rgrp.c
fs/gfs2/trans.c
fs/hfs/dir.c
fs/hfsplus/dir.c
fs/hostfs/hostfs_kern.c
fs/hpfs/dir.c
fs/hppfs/hppfs.c
fs/internal.h
fs/isofs/dir.c
fs/jbd/transaction.c
fs/jbd2/Kconfig
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/jffs2/dir.c
fs/jfs/jfs_dtree.c
fs/jfs/jfs_dtree.h
fs/jfs/jfs_metapage.c
fs/jfs/namei.c
fs/libfs.c
fs/logfs/dir.c
fs/logfs/file.c
fs/logfs/segment.c
fs/minix/dir.c
fs/ncpfs/dir.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfsd/nfs4recover.c
fs/nfsd/vfs.c
fs/nilfs2/dir.c
fs/ntfs/aops.c
fs/ntfs/dir.c
fs/ocfs2/aops.c
fs/ocfs2/dir.c
fs/ocfs2/dir.h
fs/ocfs2/file.c
fs/ocfs2/journal.c
fs/omfs/dir.c
fs/openpromfs/inode.c
fs/proc/base.c
fs/proc/fd.c
fs/proc/generic.c
fs/proc/internal.h
fs/proc/namespaces.c
fs/proc/proc_net.c
fs/proc/proc_sysctl.c
fs/proc/root.c
fs/qnx4/dir.c
fs/qnx6/dir.c
fs/read_write.c
fs/readdir.c
fs/reiserfs/dir.c
fs/reiserfs/inode.c
fs/reiserfs/reiserfs.h
fs/reiserfs/xattr.c
fs/romfs/super.c
fs/splice.c
fs/squashfs/dir.c
fs/sysfs/dir.c
fs/sysv/dir.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/udf/dir.c
fs/ufs/dir.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_dir2.c
fs/xfs/xfs_dir2_block.c
fs/xfs/xfs_dir2_leaf.c
fs/xfs/xfs_dir2_priv.h
fs/xfs/xfs_dir2_sf.c
fs/xfs/xfs_file.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_vnodeops.h
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/asm-generic/pgtable.h
include/linux/buffer_head.h
include/linux/console.h
include/linux/context_tracking.h
include/linux/f2fs_fs.h
include/linux/fs.h
include/linux/fscache-cache.h
include/linux/if_vlan.h
include/linux/jbd.h
include/linux/jbd2.h
include/linux/jbd_common.h
include/linux/kvm_host.h
include/linux/loop.h [deleted file]
include/linux/mm.h
include/linux/netdevice.h
include/linux/perf_event.h
include/linux/platform_data/usb3503.h
include/linux/preempt.h
include/linux/serial_core.h
include/linux/skbuff.h
include/linux/splice.h
include/linux/tty.h
include/linux/tty_ldisc.h
include/linux/usb.h
include/linux/usb/chipidea.h
include/linux/usb/hcd.h
include/linux/usb/of.h [new file with mode: 0644]
include/linux/usb/otg.h
include/linux/usb/phy.h
include/linux/usb/serial.h
include/linux/usb/tegra_usb_phy.h
include/linux/usb/wusb-wa.h
include/linux/vt_kern.h
include/linux/vtime.h
include/linux/wait.h
include/linux/writeback.h
include/media/v4l2-mem2mem.h
include/trace/events/ext3.h
include/trace/events/ext4.h
include/uapi/linux/Kbuild
include/uapi/linux/serial_core.h
kernel/context_tracking.c
kernel/cpu/idle.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/events/internal.h
kernel/kprobes.c
kernel/ptrace.c
kernel/range.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/time/tick-broadcast.c
kernel/time/tick-sched.c
kernel/wait.c
mm/readahead.c
mm/slab_common.c
mm/truncate.c
net/bluetooth/hci_core.c
net/bluetooth/l2cap_core.c
net/bridge/br_multicast.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/ethtool.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/gre.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/ip6_output.c
net/ipv6/ndisc.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/key/af_key.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/util.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_labels.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_nat_sip.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/wireless/nl80211.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/soc/samsung/idma.c
sound/usb/card.c
sound/usb/mixer.c

diff --git a/Documentation/ABI/testing/configfs-usb-gadget b/Documentation/ABI/testing/configfs-usb-gadget
new file mode 100644 (file)
index 0000000..01e769d
--- /dev/null
@@ -0,0 +1,81 @@
+What:          /config/usb-gadget
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               This group contains sub-groups corresponding to created
+               USB gadgets.
+
+What:          /config/usb-gadget/gadget
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+
+               The attributes of a gadget:
+
+               UDC             - bind a gadget to UDC/unbind a gadget;
+                               write UDC's name found in /sys/class/udc/*
+                               to bind a gadget, empty string "" to unbind.
+
+               bDeviceClass    - USB device class code
+               bDeviceSubClass - USB device subclass code
+               bDeviceProtocol - USB device protocol code
+               bMaxPacketSize0 - maximum endpoint 0 packet size
+               bcdDevice       - bcd device release number
+               bcdUSB          - bcd USB specification version number
+               idProduct       - product ID
+               idVendor        - vendor ID
+
+What:          /config/usb-gadget/gadget/configs
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               This group contains a USB gadget's configurations
+
+What:          /config/usb-gadget/gadget/configs/config
+Date:          Jun 2013
+KernelVersion: 3.11
+Description:
+               The attributes of a configuration:
+
+               bmAttributes    - configuration characteristics
+               MaxPower        - maximum power consumption from the bus
+
+What:          /config/usb-gadget/gadget/configs/config/strings
+Date:          Jun 2013
+KernelVersion: 3.11
+Description:
+               This group contains subdirectories for language-specific
+               strings for this configuration.
+
+What:          /config/usb-gadget/gadget/configs/config/strings/language
+Date:          Jun 2013
+KernelVersion: 3.11
+Description:
+               The attributes:
+
+               configuration   - configuration description
+
+
+What:          /config/usb-gadget/gadget/functions
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               This group contains functions available to this USB gadget.
+
+What:          /config/usb-gadget/gadget/strings
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               This group contains subdirectories for language-specific
+               strings for this gadget.
+
+What:          /config/usb-gadget/gadget/strings/language
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               The attributes:
+
+               serialnumber    - gadget's serial number (string)
+               product         - gadget's product description
+               manufacturer    - gadget's manufacturer description
+
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-acm b/Documentation/ABI/testing/configfs-usb-gadget-acm
new file mode 100644 (file)
index 0000000..5708a56
--- /dev/null
@@ -0,0 +1,8 @@
+What:          /config/usb-gadget/gadget/functions/acm.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+
+               This item contains just one readonly attribute: port_num.
+               It contains the port number of the /dev/ttyGS<n> device
+               associated with acm function's instance "name".
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-ecm b/Documentation/ABI/testing/configfs-usb-gadget-ecm
new file mode 100644 (file)
index 0000000..6b9a582
--- /dev/null
@@ -0,0 +1,16 @@
+What:          /config/usb-gadget/gadget/functions/ecm.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               The attributes:
+
+               ifname          - network device interface name associated with
+                               this function instance
+               qmult           - queue length multiplier for high and
+                               super speed
+               host_addr       - MAC address of host's end of this
+                               Ethernet over USB link
+               dev_addr        - MAC address of device's end of this
+                               Ethernet over USB link
+
+
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-eem b/Documentation/ABI/testing/configfs-usb-gadget-eem
new file mode 100644 (file)
index 0000000..dbddf36
--- /dev/null
@@ -0,0 +1,14 @@
+What:          /config/usb-gadget/gadget/functions/eem.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               The attributes:
+
+               ifname          - network device interface name associated with
+                               this function instance
+               qmult           - queue length multiplier for high and
+                               super speed
+               host_addr       - MAC address of host's end of this
+                               Ethernet over USB link
+               dev_addr        - MAC address of device's end of this
+                               Ethernet over USB link
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-ncm b/Documentation/ABI/testing/configfs-usb-gadget-ncm
new file mode 100644 (file)
index 0000000..bc309f4
--- /dev/null
@@ -0,0 +1,15 @@
+What:          /config/usb-gadget/gadget/functions/ncm.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               The attributes:
+
+               ifname          - network device interface name associated with
+                               this function instance
+               qmult           - queue length multiplier for high and
+                               super speed
+               host_addr       - MAC address of host's end of this
+                               Ethernet over USB link
+               dev_addr        - MAC address of device's end of this
+                               Ethernet over USB link
+
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-obex b/Documentation/ABI/testing/configfs-usb-gadget-obex
new file mode 100644 (file)
index 0000000..aaa5c96
--- /dev/null
@@ -0,0 +1,9 @@
+What:          /config/usb-gadget/gadget/functions/obex.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+
+               This item contains just one readonly attribute: port_num.
+               It contains the port number of the /dev/ttyGS<n> device
+               associated with obex function's instance "name".
+
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-phonet b/Documentation/ABI/testing/configfs-usb-gadget-phonet
new file mode 100644 (file)
index 0000000..3e3b742
--- /dev/null
@@ -0,0 +1,8 @@
+What:          /config/usb-gadget/gadget/functions/phonet.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+
+               This item contains just one readonly attribute: ifname.
+               It contains the network interface name assigned during
+               network device registration.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-rndis b/Documentation/ABI/testing/configfs-usb-gadget-rndis
new file mode 100644 (file)
index 0000000..822e6da
--- /dev/null
@@ -0,0 +1,14 @@
+What:          /config/usb-gadget/gadget/functions/rndis.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               The attributes:
+
+               ifname          - network device interface name associated with
+                               this function instance
+               qmult           - queue length multiplier for high and
+                               super speed
+               host_addr       - MAC address of host's end of this
+                               Ethernet over USB link
+               dev_addr        - MAC address of device's end of this
+                               Ethernet over USB link
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-serial b/Documentation/ABI/testing/configfs-usb-gadget-serial
new file mode 100644 (file)
index 0000000..16f130c
--- /dev/null
@@ -0,0 +1,9 @@
+What:          /config/usb-gadget/gadget/functions/gser.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+
+               This item contains just one readonly attribute: port_num.
+               It contains the port number of the /dev/ttyGS<n> device
+               associated with gser function's instance "name".
+
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-subset b/Documentation/ABI/testing/configfs-usb-gadget-subset
new file mode 100644 (file)
index 0000000..154ae59
--- /dev/null
@@ -0,0 +1,14 @@
+What:          /config/usb-gadget/gadget/functions/geth.name
+Date:          Jun 2013
+KenelVersion:  3.11
+Description:
+               The attributes:
+
+               ifname          - network device interface name associated with
+                               this function instance
+               qmult           - queue length multiplier for high and
+                               super speed
+               host_addr       - MAC address of host's end of this
+                               Ethernet over USB link
+               dev_addr        - MAC address of device's end of this
+                               Ethernet over USB link
index f093e59cbe5f017531b74ce45dcc3d130ddfd44d..9759b8c913329cde110754b84438133a4738dcd7 100644 (file)
@@ -236,3 +236,30 @@ Description:
                This attribute is to expose these information to user space.
                The file will read "hotplug", "wired" and "not used" if the
                information is available, and "unknown" otherwise.
+
+What:          /sys/bus/usb/devices/.../power/usb2_lpm_l1_timeout
+Date:          May 2013
+Contact:       Mathias Nyman <mathias.nyman@linux.intel.com>
+Description:
+               USB 2.0 devices may support hardware link power management (LPM)
+               L1 sleep state. The usb2_lpm_l1_timeout attribute allows
+               tuning the timeout for L1 inactivity timer (LPM timer), e.g.
+               needed inactivity time before host requests the device to go to L1 sleep.
+               Useful for power management tuning.
+               Supported values are 0 - 65535 microseconds.
+
+What:          /sys/bus/usb/devices/.../power/usb2_lpm_besl
+Date:          May 2013
+Contact:       Mathias Nyman <mathias.nyman@linux.intel.com>
+Description:
+               USB 2.0 devices that support hardware link power management (LPM)
+               L1 sleep state now use a best effort service latency value (BESL) to
+               indicate the best effort to resumption of service to the device after the
+               initiation of the resume event.
+               If the device does not have a preferred besl value then the host can select
+               one instead. This usb2_lpm_besl attribute allows to tune the host selected besl
+               value in order to tune power saving and service latency.
+
+               Supported values are 0 - 15.
+               More information on how besl values map to microseconds can be found in
+               USB 2.0 ECN Errata for Link Power Management, section 4.10)
index 25b1e751b77714a5ecbf05f56b3d0f7784cee386..5977e287532560fb0348af3c31ed30c9fe4d42dd 100644 (file)
@@ -36,3 +36,22 @@ Description:
 
                 Refer to [ECMA-368] section 10.3.1.1 for the value to
                 use.
+
+What:           /sys/class/uwb_rc/uwbN/wusbhc/wusb_dnts
+Date:           June 2013
+KernelVersion:  3.11
+Contact:        Thomas Pugliese <thomas.pugliese@gmail.com>
+Description:
+                The device notification time slot (DNTS) count and inverval in
+                milliseconds that the WUSB host should use.  This controls how
+                often the devices will have the opportunity to send
+                notifications to the host.
+
+What:           /sys/class/uwb_rc/uwbN/wusbhc/wusb_retry_count
+Date:           June 2013
+KernelVersion:  3.11
+Contact:        Thomas Pugliese <thomas.pugliese@gmail.com>
+Description:
+                The number of retries that the WUSB host should attempt
+                before reporting an error for a bus transaction.  The range of
+                valid values is [0..15], where 0 indicates infinite retries.
index dca0ecd54dc699fa92e1cab272e665423712cd45..ff44c16fc0801090b8020c28e7eb6bda8ecc8ea1 100644 (file)
@@ -1,18 +1,27 @@
   <title>Codec Interface</title>
 
-  <note>
-    <title>Suspended</title>
+  <para>A V4L2 codec can compress, decompress, transform, or otherwise
+convert video data from one format into another format, in memory. Typically
+such devices are memory-to-memory devices (i.e. devices with the
+<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
+capability set).
+</para>
 
-    <para>This interface has been be suspended from the V4L2 API
-implemented in Linux 2.6 until we have more experience with codec
-device interfaces.</para>
-  </note>
+  <para>A memory-to-memory video node acts just like a normal video node, but it
+supports both output (sending frames from memory to the codec hardware) and
+capture (receiving the processed frames from the codec hardware into memory)
+stream I/O. An application will have to setup the stream
+I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
+to start the codec.</para>
 
-  <para>A V4L2 codec can compress, decompress, transform, or otherwise
-convert video data from one format into another format, in memory.
-Applications send data to be converted to the driver through a
-&func-write; call, and receive the converted data through a
-&func-read; call. For efficiency a driver may also support streaming
-I/O.</para>
+  <para>Video compression codecs use the MPEG controls to setup their codec parameters
+(note that the MPEG controls actually support many more codecs than just MPEG).
+See <xref linkend="mpeg-controls"></xref>.</para>
 
-  <para>[to do]</para>
+  <para>Memory-to-memory devices can often be used as a shared resource: you can
+open the video node multiple times, each application setting up their own codec properties
+that are local to the file handle, and each can use it independently from the others.
+The driver will arbitrate access to the codec and reprogram it whenever another file
+handler gets access. This is different from the usual video node behavior where the video properties
+are global to the device (i.e. changing something through one file handle is visible
+through another file handle).</para>
index bfc93cdcf69644ca9d4e42d0652881330c747e3e..bfe823dd0f31cf7c6adecb79b5b8a888f434b888 100644 (file)
@@ -493,7 +493,7 @@ and discussions on the V4L mailing list.</revremark>
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.9</subtitle>
+ <subtitle>Revision 3.10</subtitle>
 
   <chapter id="common">
     &sub-common;
index 926cf1b5e63ebe7d3d597168c5c3042174c5bcb7..f93810d599adbe0defe3bdb975466e900eef9f49 100644 (file)
@@ -12,20 +12,20 @@ The second type has to be explicitly loaded and unloaded. This will be called
 any time with each driver sharing the console with other drivers including
 the system driver. However, modular drivers cannot take over the console
 that is currently occupied by another modular driver. (Exception: Drivers that
-call take_over_console() will succeed in the takeover regardless of the type
+call do_take_over_console() will succeed in the takeover regardless of the type
 of driver occupying the consoles.) They can only take over the console that is
 occupied by the system driver. In the same token, if the modular driver is
 released by the console, the system driver will take over.
 
 Modular drivers, from the programmer's point of view, has to call:
 
-        take_over_console() - load and bind driver to console layer
-        give_up_console() - unbind and unload driver
+        do_take_over_console() - load and bind driver to console layer
+        give_up_console() - unload driver, it will only work if driver is fully unbond
 
 In newer kernels, the following are also available:
 
-        register_con_driver()
-        unregister_con_driver()
+        do_register_con_driver()
+        do_unregister_con_driver()
 
 If sysfs is enabled, the contents of /sys/class/vtconsole can be
 examined. This shows the console backends currently registered by the
@@ -94,12 +94,12 @@ for more details).
 Notes for developers:
 =====================
 
-take_over_console() is now broken up into:
+do_take_over_console() is now broken up into:
 
-     register_con_driver()
-     bind_con_driver() - private function
+     do_register_con_driver()
+     do_bind_con_driver() - private function
 
-give_up_console() is a wrapper to unregister_con_driver(), and a driver must
+give_up_console() is a wrapper to do_unregister_con_driver(), and a driver must
 be fully unbound for this call to succeed. con_is_bound() will check if the
 driver is bound or not.
 
@@ -109,10 +109,10 @@ Guidelines for console driver writers:
 In order for binding to and unbinding from the console to properly work,
 console drivers must follow these guidelines:
 
-1. All drivers, except system drivers, must call either register_con_driver()
-   or take_over_console(). register_con_driver() will just add the driver to
+1. All drivers, except system drivers, must call either do_register_con_driver()
+   or do_take_over_console(). do_register_con_driver() will just add the driver to
    the console's internal list. It won't take over the
-   console. take_over_console(), as it name implies, will also take over (or
+   console. do_take_over_console(), as it name implies, will also take over (or
    bind to) the console.
 
 2. All resources allocated during con->con_init() must be released in
@@ -128,10 +128,10 @@ console drivers must follow these guidelines:
    rebind the driver to the console arrives.
 
 4. Upon exit of the driver, ensure that the driver is totally unbound. If the
-   condition is satisfied, then the driver must call unregister_con_driver()
+   condition is satisfied, then the driver must call do_unregister_con_driver()
    or give_up_console().
 
-5. unregister_con_driver() can also be called on conditions which make it
+5. do_unregister_con_driver() can also be called on conditions which make it
    impossible for the driver to service console requests.  This can happen
    with the framebuffer console that suddenly lost all of its drivers.
 
index 3f62adfb3e0b4102dbeb888497a5f58dc90b5295..de9f6b78ee515ca3893a3ac954f2a15d516c7a76 100644 (file)
@@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE)
 
 Required properties:
 
-- compatible   : should be "samsung,exynos4212-fimc" for Exynos4212 and
+- compatible   : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
                  Exynos4412 SoCs;
 - reg          : physical base address and size of the device memory mapped
                  registers;
index b462d0c548237fae605cfe63a689cc216f8f945e..c662eb36be2926e60d97c1cfd8489d0da839ba89 100644 (file)
@@ -8,6 +8,8 @@ Required properties:
 Optional properties:
 - fsl,uart-has-rtscts : Indicate the uart has rts and cts
 - fsl,irda-mode : Indicate the uart supports irda mode
+- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
+                  is DCE mode by default.
 
 Example:
 
@@ -16,4 +18,5 @@ serial@73fbc000 {
        reg = <0x73fbc000 0x4000>;
        interrupts = <31>;
        fsl,uart-has-rtscts;
+       fsl,dte-mode;
 };
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-lpuart.txt b/Documentation/devicetree/bindings/tty/serial/fsl-lpuart.txt
new file mode 100644 (file)
index 0000000..6fd1dd1
--- /dev/null
@@ -0,0 +1,14 @@
+* Freescale low power universal asynchronous receiver/transmitter (lpuart)
+
+Required properties:
+- compatible : Should be "fsl,<soc>-lpuart"
+- reg : Address and length of the register set for the device
+- interrupts : Should contain uart interrupt
+
+Example:
+
+uart0: serial@40027000 {
+              compatible = "fsl,vf610-lpuart";
+              reg = <0x40027000 0x1000>;
+              interrupts = <0 61 0x00>;
+       };
index 1c04a4c9515f981e570fe778b748bfeb3d5b3e92..b4b5b7906c88b0d2e06111068c9e3279e14b07a8 100644 (file)
@@ -5,6 +5,12 @@ Required properties:
 - reg: Should contain registers location and length
 - interrupts: Should contain controller interrupt
 
+Recommended properies:
+- phy_type: the type of the phy connected to the core. Should be one
+  of "utmi", "utmi_wide", "ulpi", "serial" or "hsic". Without this
+  property the PORTSC register won't be touched
+- dr_mode: One of "host", "peripheral" or "otg". Defaults to "otg"
+
 Optional properties:
 - fsl,usbphy: phandler of usb phy that connects to the only one port
 - fsl,usbmisc: phandler of non-core register device, with one argument
index 34c952883276c32d46cc7643f4e90a482707102b..df0933043a5be46f705a450e3956f92d8ef77600 100644 (file)
@@ -6,27 +6,10 @@ Practice : Universal Serial Bus" with the following modifications
 and additions :
 
 Required properties :
- - compatible : Should be "nvidia,tegra20-ehci" for USB controllers
-   used in host mode.
- - phy_type : Should be one of "ulpi" or "utmi".
- - nvidia,vbus-gpio : If present, specifies a gpio that needs to be
-   activated for the bus to be powered.
- - nvidia,phy : phandle of the PHY instance, the controller is connected to.
-
-Required properties for phy_type == ulpi:
-  - nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
+ - compatible : Should be "nvidia,tegra20-ehci".
+ - nvidia,phy : phandle of the PHY that the controller is connected to.
+ - clocks : Contains a single entry which defines the USB controller's clock.
 
 Optional properties:
-  - dr_mode : dual role mode. Indicates the working mode for
-   nvidia,tegra20-ehci compatible controllers.  Can be "host", "peripheral",
-   or "otg".  Default to "host" if not defined for backward compatibility.
-      host means this is a host controller
-      peripheral means it is device controller
-      otg means it can operate as either ("on the go")
-  - nvidia,has-legacy-mode : boolean indicates whether this controller can
-    operate in legacy mode (as APX 2500 / 2600). In legacy mode some
-    registers are accessed through the APB_MISC base address instead of
-    the USB controller. Since this is a legacy issue it probably does not
-    warrant a compatible string of its own.
-  - nvidia,needs-double-reset : boolean is to be set for some of the Tegra2
-    USB ports, which need reset twice due to hardware issues.
+ - nvidia,needs-double-reset : boolean is to be set for some of the Tegra20
+   USB ports, which need reset twice due to hardware issues.
index 6bdaba2f0aa19a5e3c4b6a207a87123f44065382..c4c9e9e664aac3461a378d2ab249ba0ca662b2b2 100644 (file)
@@ -4,14 +4,49 @@ The device node for Tegra SOC USB PHY:
 
 Required properties :
  - compatible : Should be "nvidia,tegra20-usb-phy".
- - reg : Address and length of the register set for the USB PHY interface.
- - phy_type : Should be one of "ulpi" or "utmi".
+ - reg : Defines the following set of registers, in the order listed:
+   - The PHY's own register set.
+     Always present.
+   - The register set of the PHY containing the UTMI pad control registers.
+     Present if-and-only-if phy_type == utmi.
+ - phy_type : Should be one of "utmi", "ulpi" or "hsic".
+ - clocks : Defines the clocks listed in the clock-names property.
+ - clock-names : The following clock names must be present:
+   - reg: The clock needed to access the PHY's own registers. This is the
+     associated EHCI controller's clock. Always present.
+   - pll_u: PLL_U. Always present.
+   - timer: The timeout clock (clk_m). Present if phy_type == utmi.
+   - utmi-pads: The clock needed to access the UTMI pad control registers.
+     Present if phy_type == utmi.
+   - ulpi-link: The clock Tegra provides to the ULPI PHY (cdev2).
+     Present if phy_type == ulpi, and ULPI link mode is in use.
 
 Required properties for phy_type == ulpi:
   - nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
 
+Required PHY timing params for utmi phy:
+  - nvidia,hssync-start-delay : Number of 480 Mhz clock cycles to wait before
+    start of sync launches RxActive
+  - nvidia,elastic-limit : Variable FIFO Depth of elastic input store
+  - nvidia,idle-wait-delay : Number of 480 Mhz clock cycles of idle to wait
+    before declare IDLE.
+  - nvidia,term-range-adj : Range adjusment on terminations
+  - nvidia,xcvr-setup : HS driver output control
+  - nvidia,xcvr-lsfslew : LS falling slew rate control.
+  - nvidia,xcvr-lsrslew :  LS rising slew rate control.
+
 Optional properties:
   - nvidia,has-legacy-mode : boolean indicates whether this controller can
     operate in legacy mode (as APX 2500 / 2600). In legacy mode some
     registers are accessed through the APB_MISC base address instead of
-    the USB controller.
\ No newline at end of file
+    the USB controller.
+  - nvidia,is-wired : boolean. Indicates whether we can do certain kind of power
+    optimizations for the devices that are always connected. e.g. modem.
+  - dr_mode : dual role mode. Indicates the working mode for the PHY. Can be
+    "host", "peripheral", or "otg". Defaults to "host" if not defined.
+      host means this is a host controller
+      peripheral means it is device controller
+      otg means it can operate as either ("on the go")
+
+Required properties for dr_mode == otg:
+  - vbus-supply: regulator for VBUS
index 6813a715fc7dc8ff895036869b82a7a7ff1d09a0..8c5be48b43c82189f192e09126c3c63cf9c4db53 100644 (file)
@@ -4,6 +4,10 @@ Required properties:
 - compatible: Should be "smsc,usb3503".
 - reg: Specifies the i2c slave address, it should be 0x08.
 - connect-gpios: Should specify GPIO for connect.
+- disabled-ports: Should specify the ports unused.
+       '1' or '2' or '3' are availe for this property to describe the port
+       number. 1~3 property values are possible to be desribed.
+       Do not describe this property if all ports have to be enabled.
 - intn-gpios: Should specify GPIO for interrupt.
 - reset-gpios: Should specify GPIO for reset.
 - initial-mode: Should specify initial mode.
@@ -14,6 +18,7 @@ Examples:
                compatible = "smsc,usb3503";
                reg = <0x08>;
                connect-gpios = <&gpx3 0 1>;
+               disabled-ports = <2 3>;
                intn-gpios = <&gpx3 4 1>;
                reset-gpios = <&gpx3 5 1>;
                initial-mode = <1>;
index 0706d32a61e6fc0fafbbe9a975d095d5e37e95f7..9858f337529c1c6edec3444cf0ac145ec331ed07 100644 (file)
@@ -189,7 +189,7 @@ prototypes:
                                loff_t pos, unsigned len, unsigned copied,
                                struct page *page, void *fsdata);
        sector_t (*bmap)(struct address_space *, sector_t);
-       int (*invalidatepage) (struct page *, unsigned long);
+       void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
        int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -310,8 +310,8 @@ filesystems and by the swapper. The latter will eventually go away.  Please,
 keep it that way and don't breed new callers.
 
        ->invalidatepage() is called when the filesystem must attempt to drop
-some or all of the buffers from the page when it is being truncated.  It
-returns zero on success.  If ->invalidatepage is zero, the kernel uses
+some or all of the buffers from the page when it is being truncated. It
+returns zero on success. If ->invalidatepage is zero, the kernel uses
 block_invalidatepage() instead.
 
        ->releasepage() is called when the kernel is about to try to drop the
@@ -414,7 +414,7 @@ prototypes:
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
-       int (*readdir) (struct file *, void *, filldir_t);
+       int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
index bd3c56c67380b1cf27d3ef82f59747e76a2ad07f..b91e2f26b672451e687b73fb42f18bb2ebd21821 100644 (file)
@@ -98,8 +98,13 @@ Cleaning Overhead
 MOUNT OPTIONS
 ================================================================================
 
-background_gc_off      Turn off cleaning operations, namely garbage collection,
-                      triggered in background when I/O subsystem is idle.
+background_gc=%s       Turn on/off cleaning operations, namely garbage
+                       collection, triggered in background when I/O subsystem is
+                       idle. If background_gc=on, it will turn on the garbage
+                       collection and if background_gc=off, garbage collection
+                       will be truned off.
+                       Default value for this option is on. So garbage
+                       collection is on by default.
 disable_roll_forward   Disable the roll-forward recovery routine
 discard                Issue discard/TRIM commands when a segment is cleaned.
 no_heap                Disable heap-style segment allocation which finds free
index 4db22f6491e026fee3ea01da970404c601a60684..206a1bdc7321cf59a1ccc8829d09a9dccd9bcded 100644 (file)
@@ -445,3 +445,9 @@ object doesn't exist.  It's remote/distributed ones that might care...
 [mandatory]
        FS_REVAL_DOT is gone; if you used to have it, add ->d_weak_revalidate()
 in your dentry operations instead.
+--
+[mandatory]
+       vfs_readdir() is gone; switch to iterate_dir() instead
+--
+[mandatory]
+       ->readdir() is gone now; switch to ->iterate()
index bc4b06b3160a3a6842d6ac104eb1d9ec7de067ac..e6bd1ffd821e28fe05f8046836074b2f6976b6d8 100644 (file)
@@ -549,7 +549,7 @@ struct address_space_operations
 -------------------------------
 
 This describes how the VFS can manipulate mapping of a file to page cache in
-your filesystem. As of kernel 2.6.22, the following members are defined:
+your filesystem. The following members are defined:
 
 struct address_space_operations {
        int (*writepage)(struct page *page, struct writeback_control *wbc);
@@ -566,7 +566,7 @@ struct address_space_operations {
                                loff_t pos, unsigned len, unsigned copied,
                                struct page *page, void *fsdata);
        sector_t (*bmap)(struct address_space *, sector_t);
-       int (*invalidatepage) (struct page *, unsigned long);
+       void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
        ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -685,14 +685,14 @@ struct address_space_operations {
   invalidatepage: If a page has PagePrivate set, then invalidatepage
         will be called when part or all of the page is to be removed
        from the address space.  This generally corresponds to either a
-       truncation or a complete invalidation of the address space
-       (in the latter case 'offset' will always be 0).
-       Any private data associated with the page should be updated
-       to reflect this truncation.  If offset is 0, then
-       the private data should be released, because the page
-       must be able to be completely discarded.  This may be done by
-        calling the ->releasepage function, but in this case the
-        release MUST succeed.
+       truncation, punch hole  or a complete invalidation of the address
+       space (in the latter case 'offset' will always be 0 and 'length'
+       will be PAGE_CACHE_SIZE). Any private data associated with the page
+       should be updated to reflect this truncation.  If offset is 0 and
+       length is PAGE_CACHE_SIZE, then the private data should be released,
+       because the page must be able to be completely discarded.  This may
+       be done by calling the ->releasepage function, but in this case the
+       release MUST succeed.
 
   releasepage: releasepage is called on PagePrivate pages to indicate
         that the page should be freed if possible.  ->releasepage
@@ -777,7 +777,7 @@ struct file_operations {
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
-       int (*readdir) (struct file *, void *, filldir_t);
+       int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -815,7 +815,7 @@ otherwise noted.
 
   aio_write: called by io_submit(2) and other asynchronous I/O operations
 
-  readdir: called when the VFS needs to read the directory contents
+  iterate: called when the VFS needs to read the directory contents
 
   poll: called by the VFS when a process wants to check if there is
        activity on this file and (optionally) go to sleep until there
index f98ca633b5282eb838ae1b04f9497e4a4495f198..3458d6343e01de0e66f0d3f2e09efc61bab3b886 100644 (file)
@@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER
        for a passive TCP connection will happen after 63seconds.
 
 tcp_syncookies - BOOLEAN
-       Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
+       Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
        Send out syncookies when the syn backlog queue of a socket
        overflows. This is to prevent against the common 'SYN flood attack'
-       Default: FALSE
+       Default: 1
 
        Note, that syncookies is fallback facility.
        It MUST NOT be used to help highly loaded servers to stand
index f7b0c7dc25ef283cc3fe689a4c6f4124c1114e70..1f1b22fbd73935d8677fa959ecb8e8d1b7b2dd3c 100644 (file)
@@ -16,8 +16,6 @@ serial-rs485.txt
        - info about RS485 structures and support in the kernel.
 specialix.txt
        - info on hardware/driver for specialix IO8+ multiport serial card.
-stallion.txt
-       - info on using the Stallion multiport serial driver.
 sx.txt
        - info on the Specialix SX/SI multiport serial driver.
 tty.txt
diff --git a/Documentation/serial/stallion.txt b/Documentation/serial/stallion.txt
deleted file mode 100644 (file)
index 4d798c0..0000000
+++ /dev/null
@@ -1,392 +0,0 @@
-* NOTE - This is an unmaintained driver.  Lantronix, which bought Stallion
-technologies, is not active in driver maintenance, and they have no information
-on when or if they will have a 2.6 driver.
-
-James Nelson <james4765@gmail.com> - 12-12-2004
-
-Stallion Multiport Serial Driver Readme
----------------------------------------
-
-Copyright (C) 1994-1999,  Stallion Technologies.
-
-Version:   5.5.1
-Date:      28MAR99
-
-
-
-1. INTRODUCTION
-
-There are two drivers that work with the different families of Stallion
-multiport serial boards. One is for the Stallion smart boards - that is
-EasyIO, EasyConnection 8/32 and EasyConnection 8/64-PCI, the other for
-the true Stallion intelligent multiport boards - EasyConnection 8/64
-(ISA, EISA), EasyConnection/RA-PCI, ONboard and Brumby.
-
-If you are using any of the Stallion intelligent multiport boards (Brumby,
-ONboard, EasyConnection 8/64 (ISA, EISA), EasyConnection/RA-PCI) with
-Linux you will need to get the driver utility package.  This contains a
-firmware loader and the firmware images necessary to make the devices operate.
-
-The Stallion Technologies ftp site, ftp.stallion.com, will always have
-the latest version of the driver utility package.
-
-ftp://ftp.stallion.com/drivers/ata5/Linux/ata-linux-550.tar.gz
-
-As of the printing of this document the latest version of the driver
-utility package is 5.5.0. If a later version is now available then you
-should use the latest version.
-
-If you are using the EasyIO, EasyConnection 8/32 or EasyConnection 8/64-PCI
-boards then you don't need this package, although it does have a serial stats
-display program.
-
-If you require DIP switch settings, or EISA configuration files, or any
-other information related to Stallion boards then have a look at Stallion's
-web pages at http://www.stallion.com.
-
-
-
-2. INSTALLATION
-
-The drivers can be used as loadable modules or compiled into the kernel.
-You can choose which when doing a "config" on the kernel.
-
-All ISA, and EISA boards that you want to use need to be configured into
-the driver(s). All PCI boards will be automatically detected when you load
-the driver - so they do not need to be entered into the driver(s)
-configuration structure. Note that kernel PCI support is required to use PCI
-boards.
-
-There are two methods of configuring ISA and EISA boards into the drivers.
-If using the driver as a loadable module then the simplest method is to pass
-the driver configuration as module arguments. The other method is to modify
-the driver source to add configuration lines for each board in use.
-
-If you have pre-built Stallion driver modules then the module argument
-configuration method should be used. A lot of Linux distributions come with
-pre-built driver modules in /lib/modules/X.Y.Z/misc for the kernel in use.
-That makes things pretty simple to get going.
-
-
-2.1 MODULE DRIVER CONFIGURATION:
-
-The simplest configuration for modules is to use the module load arguments
-to configure any ISA or EISA boards. PCI boards are automatically
-detected, so do not need any additional configuration at all.
-
-If using EasyIO, EasyConnection 8/32 ISA, or EasyConnection 8/63-PCI
-boards then use the "stallion" driver module, Otherwise if you are using
-an EasyConnection 8/64 ISA or EISA, EasyConnection/RA-PCI, ONboard,
-Brumby or original Stallion board then use the "istallion" driver module.
-
-Typically to load up the smart board driver use:
-
-    modprobe stallion
-
-This will load the EasyIO and EasyConnection 8/32 driver. It will output a
-message to say that it loaded and print the driver version number. It will
-also print out whether it found the configured boards or not. These messages
-may not appear on the console, but typically are always logged to
-/var/adm/messages or /var/log/syslog files - depending on how the klogd and
-syslogd daemons are setup on your system.
-
-To load the intelligent board driver use:
-
-    modprobe istallion
-
-It will output similar messages to the smart board driver.
-
-If not using an auto-detectable board type (that is a PCI board) then you
-will also need to supply command line arguments to the modprobe command
-when loading the driver. The general form of the configuration argument is
-
-    board?=<name>[,<ioaddr>[,<addr>][,<irq>]]
-
-where:
-
-    board?  -- specifies the arbitrary board number of this board,
-               can be in the range 0 to 3.
-
-    name    -- textual name of this board. The board name is the common
-               board name, or any "shortened" version of that. The board
-               type number may also be used here.
-
-    ioaddr  -- specifies the I/O address of this board. This argument is
-               optional, but should generally be specified.
-
-    addr    -- optional second address argument. Some board types require
-               a second I/O address, some require a memory address. The
-               exact meaning of this argument depends on the board type.
-
-    irq     -- optional IRQ line used by this board.
-
-Up to 4 board configuration arguments can be specified on the load line.
-Here is some examples:
-
-    modprobe stallion board0=easyio,0x2a0,5
-
-This configures an EasyIO board as board 0 at I/O address 0x2a0 and IRQ 5.
-
-    modprobe istallion board3=ec8/64,0x2c0,0xcc000
-
-This configures an EasyConnection 8/64 ISA as board 3 at I/O address 0x2c0 at
-memory address 0xcc000.
-
-    modprobe stallion board1=ec8/32-at,0x2a0,0x280,10
-
-This configures an EasyConnection 8/32 ISA board at primary I/O address 0x2a0,
-secondary address 0x280 and IRQ 10.
-
-You will probably want to enter this module load and configuration information
-into your system startup scripts so that the drivers are loaded and configured
-on each system boot. Typically configuration files are put in the
-/etc/modprobe.d/ directory.
-
-
-2.2 STATIC DRIVER CONFIGURATION:
-
-For static driver configuration you need to modify the driver source code.
-Entering ISA and EISA boards into the driver(s) configuration structure
-involves editing the driver(s) source file. It's pretty easy if you follow
-the instructions below. Both drivers can support up to 4 boards. The smart
-card driver (the stallion.c driver) supports any combination of EasyIO and
-EasyConnection 8/32 boards (up to a total of 4). The intelligent driver
-supports any combination of ONboards, Brumbys, Stallions and EasyConnection
-8/64 (ISA and EISA) boards (up to a total of 4).
-
-To set up the driver(s) for the boards that you want to use you need to
-edit the appropriate driver file and add configuration entries.
-
-If using EasyIO or EasyConnection 8/32 ISA boards,
-   In drivers/char/stallion.c:
-      - find the definition of the stl_brdconf array (of structures)
-        near the top of the file
-      - modify this to match the boards you are going to install
-       (the comments before this structure should help)
-      - save and exit
-
-If using ONboard, Brumby, Stallion or EasyConnection 8/64 (ISA or EISA)
-boards,
-   In drivers/char/istallion.c:
-      - find the definition of the stli_brdconf array (of structures)
-        near the top of the file
-      - modify this to match the boards you are going to install
-       (the comments before this structure should help)
-      - save and exit
-
-Once you have set up the board configurations then you are ready to build
-the kernel or modules.
-
-When the new kernel is booted, or the loadable module loaded then the
-driver will emit some kernel trace messages about whether the configured
-boards were detected or not. Depending on how your system logger is set
-up these may come out on the console, or just be logged to
-/var/adm/messages or /var/log/syslog. You should check the messages to
-confirm that all is well.
-
-
-2.3 SHARING INTERRUPTS
-
-It is possible to share interrupts between multiple EasyIO and
-EasyConnection 8/32 boards in an EISA system. To do this you must be using
-static driver configuration, modifying the driver source code to add driver
-configuration. Then a couple of extra things are required:
-
-1. When entering the board resources into the stallion.c file you need to
-   mark the boards as using level triggered interrupts. Do this by replacing
-   the "0" entry at field position 6 (the last field) in the board
-   configuration structure with a "1". (This is the structure that defines
-   the board type, I/O locations, etc. for each board). All boards that are
-   sharing an interrupt must be set this way, and each board should have the
-   same interrupt number specified here as well. Now build the module or
-   kernel as you would normally.
-
-2. When physically installing the boards into the system you must enter
-   the system EISA configuration utility. You will need to install the EISA
-   configuration files for *all* the EasyIO and EasyConnection 8/32 boards
-   that are sharing interrupts. The Stallion EasyIO and EasyConnection 8/32
-   EISA configuration files required are supplied by Stallion Technologies
-   on the EASY Utilities floppy diskette (usually supplied in the box with
-   the board when purchased. If not, you can pick it up from Stallion's FTP
-   site, ftp.stallion.com). You will need to edit the board resources to
-   choose level triggered interrupts, and make sure to set each board's
-   interrupt to the same IRQ number.
-
-You must complete both the above steps for this to work. When you reboot
-or load the driver your EasyIO and EasyConnection 8/32 boards will be
-sharing interrupts.
-
-
-2.4 USING HIGH SHARED MEMORY
-
-The EasyConnection 8/64-EI, ONboard and Stallion boards are capable of
-using shared memory addresses above the usual 640K - 1Mb range. The ONboard
-ISA and the Stallion boards can be programmed to use memory addresses up to
-16Mb (the ISA bus addressing limit), and the EasyConnection 8/64-EI and
-ONboard/E can be programmed for memory addresses up to 4Gb (the EISA bus
-addressing limit).
-
-The higher than 1Mb memory addresses are fully supported by this driver.
-Just enter the address as you normally would for a lower than 1Mb address
-(in the driver's board configuration structure).
-
-
-
-2.5 TROUBLE SHOOTING
-
-If a board is not found by the driver but is actually in the system then the
-most likely problem is that the I/O address is wrong. Change the module load
-argument for the loadable module form. Or change it in the driver stallion.c
-or istallion.c configuration structure and rebuild the kernel or modules, or
-change it on the board.
-
-On EasyIO and EasyConnection 8/32 boards the IRQ is software programmable, so
-if there is a conflict you may need to change the IRQ used for a board. There
-are no interrupts to worry about for ONboard, Brumby or EasyConnection 8/64
-(ISA and EISA) boards. The memory region on EasyConnection 8/64 and
-ONboard boards is software programmable, but not on the Brumby boards.
-
-
-
-3. USING THE DRIVERS
-
-3.1 INTELLIGENT DRIVER OPERATION
-
-The intelligent boards also need to have their "firmware" code downloaded
-to them. This is done via a user level application supplied in the driver
-utility package called "stlload". Compile this program wherever you dropped
-the package files, by typing "make". In its simplest form you can then type
-
-    ./stlload -i cdk.sys
-
-in this directory and that will download board 0 (assuming board 0 is an
-EasyConnection 8/64 or EasyConnection/RA board). To download to an
-ONboard, Brumby or Stallion do:
-
-    ./stlload -i 2681.sys
-
-Normally you would want all boards to be downloaded as part of the standard
-system startup. To achieve this, add one of the lines above into the
-/etc/rc.d/rc.S or /etc/rc.d/rc.serial file. To download each board just add
-the "-b <brd-number>" option to the line. You will need to download code for
-every board. You should probably move the stlload program into a system
-directory, such as /usr/sbin. Also, the default location of the cdk.sys image
-file in the stlload down-loader is /usr/lib/stallion. Create that directory
-and put the cdk.sys and 2681.sys files in it. (It's a convenient place to put
-them anyway). As an example your /etc/rc.d/rc.S file might have the
-following lines added to it (if you had 3 boards):
-
-    /usr/sbin/stlload -b 0 -i /usr/lib/stallion/cdk.sys
-    /usr/sbin/stlload -b 1 -i /usr/lib/stallion/2681.sys
-    /usr/sbin/stlload -b 2 -i /usr/lib/stallion/2681.sys
-
-The image files cdk.sys and 2681.sys are specific to the board types. The
-cdk.sys will only function correctly on an EasyConnection 8/64 board. Similarly
-the 2681.sys image fill only operate on ONboard, Brumby and Stallion boards.
-If you load the wrong image file into a board it will fail to start up, and
-of course the ports will not be operational!
-
-If you are using the modularized version of the driver you might want to put
-the modprobe calls in the startup script as well (before the download lines
-obviously).
-
-
-3.2 USING THE SERIAL PORTS
-
-Once the driver is installed you will need to setup some device nodes to
-access the serial ports. The simplest method is to use the /dev/MAKEDEV program.
-It will automatically create device entries for Stallion boards. This will
-create the normal serial port devices as /dev/ttyE# where# is the port number
-starting from 0. A bank of 64 minor device numbers is allocated to each board,
-so the first port on the second board is port 64,etc. A set of callout type
-devices may also be created. They are created as the devices /dev/cue# where #
-is the same as for the ttyE devices.
-
-For the most part the Stallion driver tries to emulate the standard PC system
-COM ports and the standard Linux serial driver. The idea is that you should
-be able to use Stallion board ports and COM ports interchangeably without
-modifying anything but the device name. Anything that doesn't work like that
-should be considered a bug in this driver!
-
-If you look at the driver code you will notice that it is fairly closely
-based on the Linux serial driver (linux/drivers/char/serial.c). This is
-intentional, obviously this is the easiest way to emulate its behavior!
-
-Since this driver tries to emulate the standard serial ports as much as
-possible, most system utilities should work as they do for the standard
-COM ports. Most importantly "stty" works as expected and "setserial" can
-also be used (excepting the ability to auto-configure the I/O and IRQ
-addresses of boards). Higher baud rates are supported in the usual fashion
-through setserial or using the CBAUDEX extensions. Note that the EasyIO and
-EasyConnection (all types) support at least 57600 and 115200 baud. The newer
-EasyConnection XP modules and new EasyIO boards support 230400 and 460800
-baud as well. The older boards including ONboard and Brumby support a
-maximum baud rate of 38400.
-
-If you are unfamiliar with how to use serial ports, then get the Serial-HOWTO
-by Greg Hankins. It will explain everything you need to know!
-
-
-
-4. NOTES
-
-You can use both drivers at once if you have a mix of board types installed
-in a system. However to do this you will need to change the major numbers
-used by one of the drivers. Currently both drivers use major numbers 24, 25
-and 28 for their devices. Change one driver to use some other major numbers,
-and then modify the mkdevnods script to make device nodes based on those new
-major numbers. For example, you could change the istallion.c driver to use
-major numbers 60, 61 and 62. You will also need to create device nodes with
-different names for the ports, for example ttyF# and cuf#.
-
-The original Stallion board is no longer supported by Stallion Technologies.
-Although it is known to work with the istallion driver.
-
-Finding a free physical memory address range can be a problem. The older
-boards like the Stallion and ONboard need large areas (64K or even 128K), so
-they can be very difficult to get into a system. If you have 16 Mb of RAM
-then you have no choice but to put them somewhere in the 640K -> 1Mb range.
-ONboards require 64K, so typically 0xd0000 is good, or 0xe0000 on some
-systems. If you have an original Stallion board, "V4.0" or Rev.O, then you
-need a 64K memory address space, so again 0xd0000 and 0xe0000 are good.
-Older Stallion boards are a much bigger problem. They need 128K of address
-space and must be on a 128K boundary. If you don't have a VGA card then
-0xc0000 might be usable - there is really no other place you can put them
-below 1Mb.
-
-Both the ONboard and old Stallion boards can use higher memory addresses as
-well, but you must have less than 16Mb of RAM to be able to use them. Usual
-high memory addresses used include 0xec0000 and 0xf00000.
-
-The Brumby boards only require 16Kb of address space, so you can usually
-squeeze them in somewhere. Common addresses are 0xc8000, 0xcc000, or in
-the 0xd0000 range. EasyConnection 8/64 boards are even better, they only
-require 4Kb of address space, again usually 0xc8000, 0xcc000 or 0xd0000
-are good.
-
-If you are using an EasyConnection 8/64-EI or ONboard/E then usually the
-0xd0000 or 0xe0000 ranges are the best options below 1Mb. If neither of
-them can be used then the high memory support to use the really high address
-ranges is the best option. Typically the 2Gb range is convenient for them,
-and gets them well out of the way.
-
-The ports of the EasyIO-8M board do not have DCD or DTR signals. So these
-ports cannot be used as real modem devices. Generally, when using these
-ports you should only use the cueX devices.
-
-The driver utility package contains a couple of very useful programs. One 
-is a serial port statistics collection and display program - very handy
-for solving serial port problems. The other is an extended option setting
-program that works with the intelligent boards.
-
-
-
-5. DISCLAIMER
-
-The information contained in this document is believed to be accurate and
-reliable. However, no responsibility is assumed by Stallion Technologies
-Pty. Ltd. for its use, nor any infringements of patents or other rights
-of third parties resulting from its use. Stallion Technologies reserves
-the right to modify the design of its products and will endeavour to change
-the information in manuals and accompanying documentation accordingly.
-
index bb8b0dc532b8d5b98d7fe44d168be90a0044bef6..77d68e23b2476f05993be8f9897b76f3bf08edea 100644 (file)
@@ -29,6 +29,8 @@ ALC269/270/275/276/280/282
   alc271-dmic  Enable ALC271X digital mic workaround
   inv-dmic     Inverted internal mic workaround
   lenovo-dock   Enables docking station I/O for some Lenovos
+  dell-headset-multi   Headset jack, which can also be used as mic-in
+  dell-headset-dock    Headset jack (without mic-in), and also dock I/O
 
 ALC662/663/272
 ==============
@@ -42,6 +44,7 @@ ALC662/663/272
   asus-mode7   ASUS
   asus-mode8   ASUS
   inv-dmic     Inverted internal mic workaround
+  dell-headset-multi   Headset jack, which can also be used as mic-in
 
 ALC680
 ======
diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt
new file mode 100644 (file)
index 0000000..8ec2a67
--- /dev/null
@@ -0,0 +1,384 @@
+
+
+
+
+               Linux USB gadget configured through configfs
+
+
+                            25th April 2013
+
+
+
+
+Overview
+========
+
+A USB Linux Gadget is a device which has a UDC (USB Device Controller) and can
+be connected to a USB Host to extend it with additional functions like a serial
+port or a mass storage capability.
+
+A gadget is seen by its host as a set of configurations, each of which contains
+a number of interfaces which, from the gadget's perspective, are known as
+functions, each function representing e.g. a serial connection or a SCSI disk.
+
+Linux provides a number of functions for gadgets to use.
+
+Creating a gadget means deciding what configurations there will be
+and which functions each configuration will provide.
+
+Configfs (please see Documentation/filesystems/configfs/*) lends itslef nicely
+for the purpose of telling the kernel about the above mentioned decision.
+This document is about how to do it.
+
+It also describes how configfs integration into gadget is designed.
+
+
+
+
+Requirements
+============
+
+In order for this to work configfs must be available, so CONFIGFS_FS must be
+'y' or 'm' in .config. As of this writing USB_LIBCOMPOSITE selects CONFIGFS_FS.
+
+
+
+
+Usage
+=====
+
+(The original post describing the first function
+made available through configfs can be seen here:
+http://www.spinics.net/lists/linux-usb/msg76388.html)
+
+$ modprobe libcomposite
+$ mount none $CONFIGFS_HOME -t configfs
+
+where CONFIGFS_HOME is the mount point for configfs
+
+1. Creating the gadgets
+-----------------------
+
+For each gadget to be created its corresponding directory must be created:
+
+$ mkdir $CONFIGFS_HOME/usb_gadget/<gadget name>
+
+e.g.:
+
+$ mkdir $CONFIGFS_HOME/usb_gadget/g1
+
+...
+...
+...
+
+$ cd $CONFIGFS_HOME/usb_gadget/g1
+
+Each gadget needs to have its vendor id <VID> and product id <PID> specified:
+
+$ echo <VID> > idVendor
+$ echo <PID> > idProduct
+
+A gadget also needs its serial number, manufacturer and product strings.
+In order to have a place to store them, a strings subdirectory must be created
+for each language, e.g.:
+
+$ mkdir strings/0x409
+
+Then the strings can be specified:
+
+$ echo <serial number> > strings/0x409/serialnumber
+$ echo <manufacturer> > strings/0x409/manufacturer
+$ echo <product> > strings/0x409/product
+
+2. Creating the configurations
+------------------------------
+
+Each gadget will consist of a number of configurations, their corresponding
+directories must be created:
+
+$ mkdir configs/<name>.<number>
+
+where <name> can be any string which is legal in a filesystem and the
+<numebr> is the configuration's number, e.g.:
+
+$ mkdir configs/c.1
+
+...
+...
+...
+
+Each configuration also needs its strings, so a subdirectory must be created
+for each language, e.g.:
+
+$ mkdir configs/c.1/strings/0x409
+
+Then the configuration string can be specified:
+
+$ echo <configuration> > configs/c.1/strings/0x409/configuration
+
+Some attributes can also be set for a configuration, e.g.:
+
+$ echo 120 > configs/c.1/MaxPower
+
+3. Creating the functions
+-------------------------
+
+The gadget will provide some functions, for each function its corresponding
+directory must be created:
+
+$ mkdir functions/<name>.<instance name>
+
+where <name> corresponds to one of allowed function names and instance name
+is an arbitrary string allowed in a filesystem, e.g.:
+
+$ mkdir functions/ncm.usb0 # usb_f_ncm.ko gets loaded with request_module()
+
+...
+...
+...
+
+Each function provides its specific set of attributes, with either read-only
+or read-write access. Where applicable they need to be written to as
+appropriate.
+Please refer to Documentation/ABI/*/configfs-usb-gadget* for more information.
+
+4. Associating the functions with their configurations
+------------------------------------------------------
+
+At this moment a number of gadgets is created, each of which has a number of
+configurations specified and a number of functions available. What remains
+is specifying which function is available in which configuration (the same
+function can be used in multiple configurations). This is achieved with
+creating symbolic links:
+
+$ ln -s functions/<name>.<instance name> configs/<name>.<number>
+
+e.g.:
+
+$ ln -s functions/ncm.usb0 configs/c.1
+
+...
+...
+...
+
+5. Enabling the gadget
+----------------------
+
+All the above steps serve the purpose of composing the gadget of
+configurations and functions.
+
+An example directory structure might look like this:
+
+.
+./strings
+./strings/0x409
+./strings/0x409/serialnumber
+./strings/0x409/product
+./strings/0x409/manufacturer
+./configs
+./configs/c.1
+./configs/c.1/ncm.usb0 -> ../../../../usb_gadget/g1/functions/ncm.usb0
+./configs/c.1/strings
+./configs/c.1/strings/0x409
+./configs/c.1/strings/0x409/configuration
+./configs/c.1/bmAttributes
+./configs/c.1/MaxPower
+./functions
+./functions/ncm.usb0
+./functions/ncm.usb0/ifname
+./functions/ncm.usb0/qmult
+./functions/ncm.usb0/host_addr
+./functions/ncm.usb0/dev_addr
+./UDC
+./bcdUSB
+./bcdDevice
+./idProduct
+./idVendor
+./bMaxPacketSize0
+./bDeviceProtocol
+./bDeviceSubClass
+./bDeviceClass
+
+
+Such a gadget must be finally enabled so that the USB host can enumerate it.
+In order to enable the gadget it must be bound to a UDC (USB Device Controller).
+
+$ echo <udc name> > UDC
+
+where <udc name> is one of those found in /sys/class/udc/*
+e.g.:
+
+$ echo s3c-hsotg > UDC
+
+
+6. Disabling the gadget
+-----------------------
+
+$ echo "" > UDC
+
+7. Cleaning up
+--------------
+
+Remove functions from configurations:
+
+$ rm configs/<config name>.<number>/<function>
+
+where <config name>.<number> specify the configuration and <function> is
+a symlink to a function being removed from the configuration, e.g.:
+
+$ rm configfs/c.1/ncm.usb0
+
+...
+...
+...
+
+Remove strings directories in configurations
+
+$ rmdir configs/<config name>.<number>/strings/<lang>
+
+e.g.:
+
+$ rmdir configs/c.1/strings/0x409
+
+...
+...
+...
+
+and remove the configurations
+
+$ rmdir configs/<config name>.<number>
+
+e.g.:
+
+rmdir configs/c.1
+
+...
+...
+...
+
+Remove functions (function modules are not unloaded, though)
+
+$ rmdir functions/<name>.<instance name>
+
+e.g.:
+
+$ rmdir functions/ncm.usb0
+
+...
+...
+...
+
+Remove strings directories in the gadget
+
+$ rmdir strings/<lang>
+
+e.g.:
+
+$ rmdir strings/0x409
+
+and finally remove the gadget:
+
+$ cd ..
+$ rmdir <gadget name>
+
+e.g.:
+
+$ rmdir g1
+
+
+
+
+Implementation design
+=====================
+
+Below the idea of how configfs works is presented.
+In configfs there are items and groups, both represented as directories.
+The difference between an item and a group is that a group can contain
+other groups. In the picture below only an item is shown.
+Both items and groups can have attributes, which are represented as files.
+The user can create and remove directories, but cannot remove files,
+which can be read-only or read-write, depending on what they represent.
+
+The filesystem part of configfs operates on config_items/groups and
+configfs_attributes which are generic and of the same type for all
+configured elements. However, they are embedded in usage-specific
+larger structures. In the picture below there is a "cs" which contains
+a config_item and an "sa" which contains a configfs_attribute.
+
+The filesystem view would be like this:
+
+./
+./cs        (directory)
+   |
+   +--sa    (file)
+   |
+   .
+   .
+   .
+
+Whenever a user reads/writes the "sa" file, a function is called
+which accepts a struct config_item and a struct configfs_attribute.
+In the said function the "cs" and "sa" are retrieved using the well
+known container_of technique and an appropriate sa's function (show or
+store) is called and passed the "cs" and a character buffer. The "show"
+is for displaying the file's contents (copy data from the cs to the
+buffer), while the "store" is for modifying the file's contents (copy data
+from the buffer to the cs), but it is up to the implementer of the
+two functions to decide what they actually do.
+
+typedef struct configured_structure cs;
+typedef struc specific_attribute sa;
+
+                                       sa
+                       +----------------------------------+
+        cs             |  (*show)(cs *, buffer);          |
++-----------------+    |  (*store)(cs *, buffer, length); |
+|                 |    |                                  |
+| +-------------+ |    |       +------------------+       |
+| | struct      |-|----|------>|struct            |       |
+| | config_item | |    |       |configfs_attribute|       |
+| +-------------+ |    |       +------------------+       |
+|                 |    +----------------------------------+
+| data to be set  |                .
+|                 |                .
++-----------------+                .
+
+The file names are decided by the config item/group designer, while
+the directories in general can be named at will. A group can have
+a number of its default sub-groups created automatically.
+
+For more information on configfs please see
+Documentation/filesystems/configfs/*.
+
+The concepts described above translate to USB gadgets like this:
+
+1. A gadget has its config group, which has some attributes (idVendor,
+idProduct etc) and default sub-groups (configs, functions, strings).
+Writing to the attributes causes the information to be stored in
+appropriate locations. In the configs, functions and strings sub-groups
+a user can create their sub-groups to represent configurations, functions,
+and groups of strings in a given language.
+
+2. The user creates configurations and functions, in the configurations
+creates symbolic links to functions. This information is used when the
+gadget's UDC attribute is written to, which means binding the gadget
+to the UDC. The code in drivers/usb/gadget/configfs.c iterates over
+all configurations, and in each configuration it iterates over all
+functions and binds them. This way the whole gadget is bound.
+
+3. The file drivers/usb/gadget/configfs.c contains code for
+
+       - gadget's config_group
+       - gadget's default groups (configs, functions, strings)
+       - associating functions with configurations (symlinks)
+
+4. Each USB function naturally has its own view of what it wants
+configured, so config_groups for particular functions are defined
+in the functions implementation files drivers/usb/gadget/f_*.c.
+
+5. Funciton's code is written in such a way that it uses
+
+usb_get_function_instance(), which, in turn, calls request_module.
+So, provided that modprobe works, modules for particular functions
+are loaded automatically. Please note that the converse is not true:
+after a gadget is disabled and torn down, the modules remain loaded.
index 93898597ab617aa08c6bfafb966c0a16aa3bc099..4f43ddf7350a2bbcecf5e884e474fdf274138d3b 100644 (file)
@@ -3220,7 +3220,7 @@ F:        lib/fault-inject.c
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
 M:     Robert Love <robert.w.love@intel.com>
-L:     devel@open-fcoe.org
+L:     fcoe-devel@open-fcoe.org
 W:     www.Open-FCoE.org
 S:     Supported
 F:     drivers/scsi/libfc/
@@ -4577,7 +4577,7 @@ F:        fs/jbd2/
 F:     include/linux/jbd2.h
 
 JSM Neo PCI based serial card
-M:     Lucas Tavares <lucaskt@linux.vnet.ibm.com>
+M:     Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 L:     linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/tty/serial/jsm/
index c6863b55f7c7a48d53338d51d6c5a6b711cbadc1..e5e3ba0851913723cc205987f0840c9e20c5a877 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index 81a4342d5a3f292866a992fda852bf880a14ccbc..d8f9b7e892348359837e34b558a7a28ae121530c 100644 (file)
@@ -354,9 +354,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 #define kern_addr_valid(addr)  (1)
 #endif
 
-#define io_remap_pfn_range(vma, start, pfn, size, prot)        \
-               remap_pfn_range(vma, start, pfn, size, prot)
-
 #define pte_ERROR(e) \
        printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
index da711e37fc975af1041b7229d02b5aea84856f40..6a61deed4a853ce8fdbb6b753070f2ca29aef08f 100644 (file)
@@ -61,7 +61,9 @@ locate_and_init_vga(void *(*sel_func)(void *, void *))
 
        /* Set the VGA hose and init the new console. */
        pci_vga_hose = hose;
-       take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
+       console_lock();
+       do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
+       console_unlock();
 }
 
 void __init
index b9e37ad6fa19ca58b762ab0337d0acd8ea6e30f3..1402fcc11c2c0443d18d59e21192f5af50ecb1c2 100644 (file)
@@ -96,6 +96,7 @@ struct osf_dirent {
 };
 
 struct osf_dirent_callback {
+       struct dir_context ctx;
        struct osf_dirent __user *dirent;
        long __user *basep;
        unsigned int count;
@@ -146,17 +147,17 @@ SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd,
 {
        int error;
        struct fd arg = fdget(fd);
-       struct osf_dirent_callback buf;
+       struct osf_dirent_callback buf = {
+               .ctx.actor = osf_filldir,
+               .dirent = dirent,
+               .basep = basep,
+               .count = count
+       };
 
        if (!arg.file)
                return -EBADF;
 
-       buf.dirent = dirent;
-       buf.basep = basep;
-       buf.count = count;
-       buf.error = 0;
-
-       error = vfs_readdir(arg.file, osf_filldir, &buf);
+       error = iterate_dir(arg.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
        if (count != buf.count)
index b51f7b4818cd07117308d645abfae4a91eeb0aa3..2b183b0d3207a3b64bb681792defeb3ca659617f 100644 (file)
@@ -26,7 +26,6 @@ static int hose_mmap_page_range(struct pci_controller *hose,
                base = sparse ? hose->sparse_io_base : hose->dense_io_base;
 
        vma->vm_pgoff += base >> PAGE_SHIFT;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 
        return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                                  vma->vm_end - vma->vm_start,
index ab80a80d38a214d57d6fa06c2f2e8e601ea0415b..f2360a74e5d5544983160d951c46bddb98819e0e 100644 (file)
@@ -117,7 +117,9 @@ common_shutdown_1(void *generic_ptr)
                if (in_interrupt())
                        irq_exit();
                /* This has the effect of resetting the VGA video origin.  */
-               take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
+               console_lock();
+               do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
+               console_unlock();
 #endif
                pci_restore_srm_config();
                set_hae(srm_hae);
index 95b1522212a73fce42cd1a7a5c18546c61232346..c110ac87d22bcc56ee2448da299f7cb48f6d6763 100644 (file)
@@ -394,9 +394,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  * remap a physical page `pfn' of size `size' with page protection `prot'
  * into virtual address `from'
  */
-#define io_remap_pfn_range(vma, from, pfn, size, prot) \
-                       remap_pfn_range(vma, from, pfn, size, prot)
-
 #include <asm-generic/pgtable.h>
 
 /* to cope with aliasing VIPT cache */
index 49d993cee51232874a81814fe39ca99e09b88bad..136f263ed47b79d010cf4ca06e7b1e2a07e4fd73 100644 (file)
@@ -1087,6 +1087,20 @@ if !MMU
 source "arch/arm/Kconfig-nommu"
 endif
 
+config PJ4B_ERRATA_4742
+       bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation"
+       depends on CPU_PJ4B && MACH_ARMADA_370
+       default y
+       help
+         When coming out of either a Wait for Interrupt (WFI) or a Wait for
+         Event (WFE) IDLE states, a specific timing sensitivity exists between
+         the retiring WFI/WFE instructions and the newly issued subsequent
+         instructions.  This sensitivity can result in a CPU hang scenario.
+         Workaround:
+         The software must insert either a Data Synchronization Barrier (DSB)
+         or Data Memory Barrier (DMB) command immediately after the WFI/WFE
+         instruction
+
 config ARM_ERRATA_326103
        bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
        depends on CPU_V6
@@ -1189,6 +1203,16 @@ config PL310_ERRATA_588369
           is not correctly implemented in PL310 as clean lines are not
           invalidated as a result of these operations.
 
+config ARM_ERRATA_643719
+       bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 643719 Cortex-A9 (prior to
+         r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
+         register returns zero when it should return one. The workaround
+         corrects this value, ensuring cache maintenance operations which use
+         it behave as intended and avoiding data corruption.
+
 config ARM_ERRATA_720789
        bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
        depends on CPU_V7
@@ -2006,7 +2030,7 @@ config XIP_PHYS_ADDR
 
 config KEXEC
        bool "Kexec system call (EXPERIMENTAL)"
-       depends on (!SMP || HOTPLUG_CPU)
+       depends on (!SMP || PM_SLEEP_SMP)
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 79e9bdbfc491a29521939aa2747862fc491c6d6d..120b83bfde20e5b8f3d46b4f13ff4c82ef75a471 100644 (file)
@@ -116,7 +116,8 @@ targets       := vmlinux vmlinux.lds \
 
 # Make sure files are removed during clean
 extra-y       += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
-                lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
+                lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
+                hyp-stub.S
 
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
index d1650fb34c0a38e85f90b798d103a94612e7c65d..ded558bb0f3bb88219655656976134f6ac749810 100644 (file)
                };
        };
 
-       pinctrl@03680000 {
+       pinctrl@03860000 {
                gpz: gpz {
                        gpio-controller;
                        #gpio-cells = <2>;
index 0673524238a61f706c7c17a096ef3319fb095287..fc9fb3d526e25aff898f4e226284e7204413d2ac 100644 (file)
                interrupts = <0 50 0>;
        };
 
-       pinctrl_3: pinctrl@03680000 {
+       pinctrl_3: pinctrl@03860000 {
                compatible = "samsung,exynos5250-pinctrl";
-               reg = <0x0368000 0x1000>;
+               reg = <0x03860000 0x1000>;
                interrupts = <0 47 0>;
        };
 
index a573b94b7c93eafb313e42bf8dfde31e9ecfcd6a..c12af78e479cf3f7d69bfbcc21812287e549ac42 100644 (file)
 
        usb@c5004000 {
                status = "okay";
-               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
+       };
+
+       usb-phy@c5004000 {
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
        };
 
        sdhci@c8000600 {
index e7d5de4e00b99e57628298cd5c3d4c3fb4fb67fd..ec5293758753a8e04e42d6159576e045b3d57a90 100644 (file)
                status = "okay";
        };
 
+       usb-phy@c5000000 {
+               status = "okay";
+       };
+
        usb@c5004000 {
                status = "okay";
-               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
+       };
+
+       usb-phy@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
        };
 
        usb@c5008000 {
                status = "okay";
        };
 
-       usb-phy@c5004400 {
-               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+       usb-phy@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000200 {
index 52f1103907d786336716a23d7a9d1c57082e1b65..9f64f708688197129fbbe7f2f3d8128726eab8a1 100644 (file)
 
        usb@c5000000 {
                status = "okay";
-               dr_mode = "otg";
+       };
+
+       usb-phy@c5000000 {
+               status = "okay";
        };
 
        usb@c5008000 {
                status = "okay";
        };
 
+       usb-phy@c5008000 {
+               status = "okay";
+       };
+
        serial@70006000 {
                status = "okay";
        };
index e3e0c9977df451a778006bc71d75d3aec9022f93..1c17ffaff1ad8aaa5626928bcc75f4c79802bd25 100644 (file)
                status = "okay";
        };
 
+       usb-phy@c5000000 {
+               status = "okay";
+       };
+
        usb@c5004000 {
                status = "okay";
-               nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
+               nvidia,phy-reset-gpio = <&gpio 168 1>; /* gpio PV0, active low */
+       };
+
+       usb-phy@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 168 1>; /* gpio PV0, active low */
        };
 
        usb@c5008000 {
                status = "okay";
        };
 
-       usb-phy@c5004400 {
-               nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
+       usb-phy@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000000 {
index cee4c34010fed6fa1d1ce96a1c45bebccf97a974..009dafecf88b904ee95dc304a4885bffd92ff1e5 100644 (file)
                dr_mode = "otg";
        };
 
+       usb-phy@c5000000 {
+               status = "okay";
+               vbus-supply = <&vbus_reg>;
+               dr_mode = "otg";
+       };
+
        usb@c5004000 {
                status = "okay";
-               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
+       };
+
+       usb-phy@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
        };
 
        usb@c5008000 {
                status = "okay";
        };
 
-       usb-phy@c5004400 {
-               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+       usb-phy@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000000 {
                        gpio = <&pmic 1 0>;
                        enable-active-high;
                };
+
+               vbus_reg: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "vdd_vbus_wup1";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       gpio = <&gpio 24 0>; /* PD0 */
+               };
        };
 
        sound {
index 50b3ec16b93aa20e6a70f42fe0e0825657e93eb4..fc2f7d6e70b23fef9d167e3541ba21cbe52d176f 100644 (file)
                status = "okay";
        };
 
+       usb-phy@c5008000 {
+               status = "okay";
+       };
+
        sdhci@c8000600 {
                cd-gpios = <&gpio 58 1>; /* gpio PH2 */
                wp-gpios = <&gpio 59 0>; /* gpio PH3 */
index 9cc78a15d739860393edb0b227aba930444cb59f..0e65c00ec732415c2902112db584fa566584e87d 100644 (file)
                nvidia,vbus-gpio = <&gpio 170 0>; /* gpio PV2 */
        };
 
+       usb-phy@c5000000 {
+               status = "okay";
+               vbus-supply = <&vbus_reg>;
+       };
+
        usb@c5004000 {
                status = "okay";
-               nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
+               nvidia,phy-reset-gpio = <&gpio 168 1>; /* gpio PV0, active low */
+       };
+
+       usb-phy@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 168 1>; /* gpio PV0, active low */
        };
 
        usb@c5008000 {
                status = "okay";
        };
 
-       usb-phy@c5004400 {
-               nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
+       usb-phy@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000000 {
                        regulator-max-microvolt = <1800000>;
                        regulator-always-on;
                };
+
+               vbus_reg: regulator@2 {
+                       compatible = "regulator-fixed";
+                       reg = <2>;
+                       regulator-name = "usb1_vbus";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       gpio = <&gpio 170 0>; /* PV2 */
+               };
        };
 
        sound {
index dd38f1f038347e6d328f1af6b0757d7fa03b9c36..e00f89e645f9112f50e3294e7c2d21489032000f 100644 (file)
                status = "okay";
        };
 
+       usb-phy@c5000000 {
+               status = "okay";
+       };
+
        usb@c5004000 {
                status = "okay";
-               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
+       };
+
+       usb-phy@c5004000 {
+               status = "okay";
+               nvidia,phy-reset-gpio = <&gpio 169 1>; /* gpio PV1, active low */
        };
 
        usb@c5008000 {
                status = "okay";
        };
 
-       usb-phy@c5004400 {
-               nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+       usb-phy@c5008000 {
+               status = "okay";
        };
 
        sdhci@c8000000 {
index d2567f83aaffd19584e45c8d080a560e74d1172d..3c24c9b92b440f6f1baef64b2c5874ca57b95f69 100644 (file)
                nvidia,vbus-gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
        };
 
+       usb-phy@c5000000 {
+               status = "okay";
+               vbus-supply = <&vbus1_reg>;
+       };
+
        usb@c5008000 {
                status = "okay";
                nvidia,vbus-gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
        };
 
+       usb-phy@c5008000 {
+               status = "okay";
+               vbus-supply = <&vbus3_reg>;
+       };
+
        sdhci@c8000400 {
                status = "okay";
                cd-gpios = <&gpio 69 1>; /* gpio PI5 */
                        regulator-max-microvolt = <5000000>;
                        regulator-always-on;
                };
+
+               vbus1_reg: regulator@2 {
+                       compatible = "regulator-fixed";
+                       reg = <2>;
+                       regulator-name = "vbus1";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
+               };
+
+               vbus3_reg: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "vbus3";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
+               };
        };
 
        sound {
index 56a91106041b31ca91a6ef9559e11abcc8f2cc1e..96d6d8a3aa7208751a3be3d7b4e4fb93a1eeba3e 100644 (file)
                status = "disabled";
        };
 
-       phy1: usb-phy@c5000400 {
+       phy1: usb-phy@c5000000 {
                compatible = "nvidia,tegra20-usb-phy";
-               reg = <0xc5000400 0x3c00>;
+               reg = <0xc5000000 0x4000 0xc5000000 0x4000>;
                phy_type = "utmi";
+               clocks = <&tegra_car 22>,
+                        <&tegra_car 127>,
+                        <&tegra_car 106>,
+                        <&tegra_car 22>;
+               clock-names = "reg", "pll_u", "timer", "utmi-pads";
                nvidia,has-legacy-mode;
-               clocks = <&tegra_car 22>, <&tegra_car 127>;
-               clock-names = "phy", "pll_u";
+               hssync_start_delay = <9>;
+               idle_wait_delay = <17>;
+               elastic_limit = <16>;
+               term_range_adj = <6>;
+               xcvr_setup = <9>;
+               xcvr_lsfslew = <1>;
+               xcvr_lsrslew = <1>;
+               status = "disabled";
        };
 
        usb@c5004000 {
                status = "disabled";
        };
 
-       phy2: usb-phy@c5004400 {
+       phy2: usb-phy@c5004000 {
                compatible = "nvidia,tegra20-usb-phy";
-               reg = <0xc5004400 0x3c00>;
+               reg = <0xc5004000 0x4000>;
                phy_type = "ulpi";
-               clocks = <&tegra_car 93>, <&tegra_car 127>;
-               clock-names = "phy", "pll_u";
+               clocks = <&tegra_car 58>,
+                        <&tegra_car 127>,
+                        <&tegra_car 93>;
+               clock-names = "reg", "pll_u", "ulpi-link";
+               status = "disabled";
        };
 
        usb@c5008000 {
                status = "disabled";
        };
 
-       phy3: usb-phy@c5008400 {
+       phy3: usb-phy@c5008000 {
                compatible = "nvidia,tegra20-usb-phy";
-               reg = <0xc5008400 0x3c00>;
+               reg = <0xc5008000 0x4000 0xc5000000 0x4000>;
                phy_type = "utmi";
-               clocks = <&tegra_car 22>, <&tegra_car 127>;
-               clock-names = "phy", "pll_u";
+               clocks = <&tegra_car 59>,
+                        <&tegra_car 127>,
+                        <&tegra_car 106>,
+                        <&tegra_car 22>;
+               clock-names = "reg", "pll_u", "timer", "utmi-pads";
+               hssync_start_delay = <9>;
+               idle_wait_delay = <17>;
+               elastic_limit = <16>;
+               term_range_adj = <6>;
+               xcvr_setup = <9>;
+               xcvr_lsfslew = <2>;
+               xcvr_lsrslew = <2>;
+               status = "disabled";
        };
 
        sdhci@c8000000 {
index bff71388e72a163c4c18c93fc745f72db9b29e00..17d0ae8672fa666b8d1e43ae40b0226e1bd04ac8 100644 (file)
@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
 }
 
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
 
 #define flush_dcache_mmap_lock(mapping) \
        spin_lock_irq(&(mapping)->tree_lock)
index 7652712d1d149ea07a8e5052746391014124ffdf..dba62cb1ad080f4a84202b68b2c53b873c68394c 100644 (file)
@@ -32,6 +32,8 @@
 
 #define MPIDR_HWID_BITMASK 0xFFFFFF
 
+#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
+
 #define MPIDR_LEVEL_BITS 8
 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
 
index ac1dd54724b6a073415cc6a6704284c81556d6da..8017e94acc5e0883082212e9714e68cebf40f52b 100644 (file)
 # endif
 #endif
 
+#ifdef CONFIG_CPU_PJ4B
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_pj4b
+# endif
+#endif
+
 #ifndef MULTI_CPU
 #define cpu_proc_init                  __glue(CPU_NAME,_proc_init)
 #define cpu_proc_fin                   __glue(CPU_NAME,_proc_fin)
index 7ec60d6075bf47f94fbec24b3f5dd64014fd643b..0642228ff78562cd484a116ab8627023f45aaee4 100644 (file)
@@ -79,8 +79,6 @@ extern unsigned int kobjsize(const void *objp);
  * No page table caches to initialise.
  */
 #define pgtable_cache_init()   do { } while (0)
-#define io_remap_pfn_range     remap_pfn_range
-
 
 /*
  * All 32bit addresses are effectively valid for vmalloc...
index 9bcd262a900842dfc685991511d4850fb3601ff5..229e0dde9c710d659eec8830fb1457dc53c39ad2 100644 (file)
@@ -318,13 +318,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-               remap_pfn_range(vma, from, pfn, size, prot)
-
 #define pgtable_cache_init() do { } while (0)
 
 #endif /* !__ASSEMBLY__ */
index aaa61b6f50fff28e77ded32b66698feb0b72db6e..e789832027374278b7c28cecf0f9562924c6e877 100644 (file)
@@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void)
 /*
  * Logical CPU mapping.
  */
-extern int __cpu_logical_map[];
+extern u32 __cpu_logical_map[];
 #define cpu_logical_map(cpu)   __cpu_logical_map[cpu]
 /*
  * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
index 5af04f6daa33804ac8a3d03b7765e7bae4a86106..5859c8bc727c4254bc7e8fa254a4271d6b214242 100644 (file)
@@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void)
        u32 i, j, cpuidx = 1;
        u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
 
-       u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX };
+       u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
        bool bootcpu_valid = false;
        cpus = of_find_node_by_path("/cpus");
 
@@ -92,6 +92,9 @@ void __init arm_dt_init_cpu_maps(void)
        for_each_child_of_node(cpus, cpu) {
                u32 hwid;
 
+               if (of_node_cmp(cpu->type, "cpu"))
+                       continue;
+
                pr_debug(" * %s...\n", cpu->full_name);
                /*
                 * A device tree containing CPU nodes with missing "reg"
@@ -149,9 +152,10 @@ void __init arm_dt_init_cpu_maps(void)
                tmp_map[i] = hwid;
        }
 
-       if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], "
-                                "fall back to default cpu_logical_map\n"))
+       if (!bootcpu_valid) {
+               pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
                return;
+       }
 
        /*
         * Since the boot CPU node contains proper data, and all nodes have
index 8ef8c9337809cbe5ccb91b33525c38a379e9e986..4fb074c446bf901df288b3878341a169dacb843b 100644 (file)
@@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image)
        unsigned long reboot_code_buffer_phys;
        void *reboot_code_buffer;
 
+       if (num_online_cpus() > 1) {
+               pr_err("kexec: error: multiple CPUs still online\n");
+               return;
+       }
 
        page_list = image->head & PAGE_MASK;
 
index 282de4826abb640bd310ce8cf6099dec297803ce..6e8931ccf13ed048f0b93880476a3911b3a9b587 100644 (file)
@@ -184,30 +184,61 @@ int __init reboot_setup(char *str)
 
 __setup("reboot=", reboot_setup);
 
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in disable_nonboot_cpus() to achieve this.
+ */
 void machine_shutdown(void)
 {
-#ifdef CONFIG_SMP
-       smp_send_stop();
-#endif
+       disable_nonboot_cpus();
 }
 
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
 void machine_halt(void)
 {
-       machine_shutdown();
+       smp_send_stop();
+
        local_irq_disable();
        while (1);
 }
 
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
 void machine_power_off(void)
 {
-       machine_shutdown();
+       smp_send_stop();
+
        if (pm_power_off)
                pm_power_off();
 }
 
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with a single CPU can
+ * use soft_restart() as their machine descriptor's .restart hook, since that
+ * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
 void machine_restart(char *cmd)
 {
-       machine_shutdown();
+       smp_send_stop();
 
        arm_pm_restart(reboot_mode, cmd);
 
index 1522c7ae31b0c239901237569bad5cbb4ec7b02e..b4b1d397592b3d6435c0a503541c37d3fffcc016 100644 (file)
@@ -444,7 +444,7 @@ void notrace cpu_init(void)
            : "r14");
 }
 
-int __cpu_logical_map[NR_CPUS];
+u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 
 void __init smp_setup_processor_id(void)
 {
index 550d63cef68e4be6ada87a3c7942ccd767061c6d..5919eb451bb9840590091c3d26a922bd65b1d005 100644 (file)
@@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-static void smp_kill_cpus(cpumask_t *mask)
-{
-       unsigned int cpu;
-       for_each_cpu(cpu, mask)
-               platform_cpu_kill(cpu);
-}
-#else
-static void smp_kill_cpus(cpumask_t *mask) { }
-#endif
-
 void smp_send_stop(void)
 {
        unsigned long timeout;
@@ -679,8 +668,6 @@ void smp_send_stop(void)
 
        if (num_online_cpus() > 1)
                pr_warning("SMP: failed to stop secondary CPUs\n");
-
-       smp_kill_cpus(&mask);
 }
 
 /*
index 15451ee4acc8f61f1ebd2f20164290c28c2b6a12..515b00064da8f66db5400c3990905f7ad89e2113 100644 (file)
@@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis)
        mrc     p15, 1, r0, c0, c0, 1           @ read clidr, r0 = clidr
        ALT_SMP(ands    r3, r0, #(7 << 21))     @ extract LoUIS from clidr
        ALT_UP(ands     r3, r0, #(7 << 27))     @ extract LoUU from clidr
+#ifdef CONFIG_ARM_ERRATA_643719
+       ALT_SMP(mrceq   p15, 0, r2, c0, c0, 0)  @ read main ID register
+       ALT_UP(moveq    pc, lr)                 @ LoUU is zero, so nothing to do
+       ldreq   r1, =0x410fc090                 @ ID of ARM Cortex A9 r0p?
+       biceq   r2, r2, #0x0000000f             @ clear minor revision number
+       teqeq   r2, r1                          @ test for errata affected core and if so...
+       orreqs  r3, #(1 << 21)                  @   fix LoUIS value (and set flags state to 'ne')
+#endif
        ALT_SMP(mov     r3, r3, lsr #20)        @ r3 = LoUIS * 2
        ALT_UP(mov      r3, r3, lsr #26)        @ r3 = LoUU * 2
        moveq   pc, lr                          @ return if level == 0
index 0d473cce501c137e8c87c40f9ccb455d6ec275eb..32aa5861119f2bdd353468114462dd8a680a60cb 100644 (file)
@@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page)
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
+/*
+ * Ensure cache coherency for the kernel mapping of this page. We can
+ * assume that the page is pinned via kmap.
+ *
+ * If the page only exists in the page cache and there are no user
+ * space mappings, this is a no-op since the page was already marked
+ * dirty at creation.  Otherwise, we need to flush the dirty kernel
+ * cache lines directly.
+ */
+void flush_kernel_dcache_page(struct page *page)
+{
+       if (cache_is_vivt() || cache_is_vipt_aliasing()) {
+               struct address_space *mapping;
+
+               mapping = page_mapping(page);
+
+               if (!mapping || mapping_mapped(mapping)) {
+                       void *addr;
+
+                       addr = page_address(page);
+                       /*
+                        * kmap_atomic() doesn't set the page virtual
+                        * address for highmem pages, and
+                        * kunmap_atomic() takes care of cache
+                        * flushing already.
+                        */
+                       if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+               }
+       }
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
 /*
  * Flush an anonymous page so that users of get_user_pages()
  * can safely access the data.  The expected sequence is:
index e0d8565671a6c8104c643937098b963ac2f517ee..4d409e6a552df67f11ea8bb81b1930e830c3dcde 100644 (file)
@@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
                        unsigned long end, phys_addr_t phys,
                        const struct mem_type *type)
 {
+       pmd_t *p = pmd;
+
 #ifndef CONFIG_ARM_LPAE
        /*
         * In classic MMU format, puds and pmds are folded in to
@@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr,
                phys += SECTION_SIZE;
        } while (pmd++, addr += SECTION_SIZE, addr != end);
 
-       flush_pmd_entry(pmd);
+       flush_pmd_entry(p);
 }
 
 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
@@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
                 */
                if (type->prot_sect &&
                                ((addr | next | phys) & ~SECTION_MASK) == 0) {
-                       map_init_section(pmd, addr, next, phys, type);
+                       __map_init_section(pmd, addr, next, phys, type);
                } else {
                        alloc_init_pte(pmd, addr, next,
                                                __phys_to_pfn(phys), type);
index d51225f90ae2d5d3af909ec87d29910bea4f23d5..eb5293a69a8440c43018ba3732b60e1cec96a710 100644 (file)
@@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page)
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
+void flush_kernel_dcache_page(struct page *page)
+{
+       __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                       unsigned long uaddr, void *dst, const void *src,
                       unsigned long len)
index d217e9795d74b0878edb666d0a73ac72b1d586cc..aaeb6c127c7aa5766e9d1093477bc65e540bdca8 100644 (file)
@@ -81,7 +81,6 @@ ENDPROC(cpu_fa526_reset)
  */
        .align  4
 ENTRY(cpu_fa526_do_idle)
-       mcr     p15, 0, r0, c7, c0, 4           @ Wait for interrupt
        mov     pc, lr
 
 
index f9a0aa725ea980f1fafb4619d97d4c014c6b6b70..e3c48a3fe0638177f980ead520a2190f089128f2 100644 (file)
@@ -333,3 +333,8 @@ ENTRY(\name\()_tlb_fns)
        .endif
        .size   \name\()_tlb_fns, . - \name\()_tlb_fns
 .endm
+
+.macro globl_equ x, y
+       .globl  \x
+       .equ    \x, \y
+.endm
index 2c73a7301ff7017eea046df2f2b67e475ab653de..e35fec34453ea13d57e2f1902a07bc99e21ba731 100644 (file)
@@ -138,6 +138,29 @@ ENTRY(cpu_v7_do_resume)
        mov     r0, r8                  @ control register
        b       cpu_resume_mmu
 ENDPROC(cpu_v7_do_resume)
+#endif
+
+#ifdef CONFIG_CPU_PJ4B
+       globl_equ       cpu_pj4b_switch_mm,     cpu_v7_switch_mm
+       globl_equ       cpu_pj4b_set_pte_ext,   cpu_v7_set_pte_ext
+       globl_equ       cpu_pj4b_proc_init,     cpu_v7_proc_init
+       globl_equ       cpu_pj4b_proc_fin,      cpu_v7_proc_fin
+       globl_equ       cpu_pj4b_reset,         cpu_v7_reset
+#ifdef CONFIG_PJ4B_ERRATA_4742
+ENTRY(cpu_pj4b_do_idle)
+       dsb                                     @ WFI may enter a low-power mode
+       wfi
+       dsb                                     @barrier
+       mov     pc, lr
+ENDPROC(cpu_pj4b_do_idle)
+#else
+       globl_equ       cpu_pj4b_do_idle,       cpu_v7_do_idle
+#endif
+       globl_equ       cpu_pj4b_dcache_clean_area,     cpu_v7_dcache_clean_area
+       globl_equ       cpu_pj4b_do_suspend,    cpu_v7_do_suspend
+       globl_equ       cpu_pj4b_do_resume,     cpu_v7_do_resume
+       globl_equ       cpu_pj4b_suspend_size,  cpu_v7_suspend_size
+
 #endif
 
        __CPUINIT
@@ -350,6 +373,9 @@ __v7_setup_stack:
 
        @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
        define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+#ifdef CONFIG_CPU_PJ4B
+       define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+#endif
 
        .section ".rodata"
 
@@ -362,7 +388,7 @@ __v7_setup_stack:
        /*
         * Standard v7 proc info content
         */
-.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
+.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
        ALT_SMP(.long   PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
                        PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
        ALT_UP(.long    PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@@ -375,7 +401,7 @@ __v7_setup_stack:
        .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
                HWCAP_EDSP | HWCAP_TLS | \hwcaps
        .long   cpu_v7_name
-       .long   v7_processor_functions
+       .long   \proc_fns
        .long   v7wbi_tlb_fns
        .long   v6_user_fns
        .long   v7_cache_fns
@@ -407,12 +433,14 @@ __v7_ca9mp_proc_info:
        /*
         * Marvell PJ4B processor.
         */
+#ifdef CONFIG_CPU_PJ4B
        .type   __v7_pj4b_proc_info, #object
 __v7_pj4b_proc_info:
-       .long   0x562f5840
-       .long   0xfffffff0
-       __v7_proc __v7_pj4b_setup
+       .long   0x560f5800
+       .long   0xff0fff00
+       __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
        .size   __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
+#endif
 
        /*
         * ARM Ltd. Cortex A7 processor.
index e333a243bfccf4f6e1547ea2543e5a3fa3abd32b..3a768e96cf0ef18bf874d83ffa3ba096eaa9bda0 100644 (file)
@@ -320,13 +320,6 @@ extern int kern_addr_valid(unsigned long addr);
 
 #include <asm-generic/pgtable.h>
 
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-               remap_pfn_range(vma, from, pfn, size, prot)
-
 #define pgtable_cache_init() do { } while (0)
 
 #endif /* !__ASSEMBLY__ */
index 1e49e5eb81e977fe44fccc369be1974617680cac..9ba33c40cdf8f841e974f68e599f0f97e87138ff 100644 (file)
@@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
                return;
        }
 
+       perf_callchain_store(entry, regs->pc);
        tail = (struct frame_tail __user *)regs->regs[29];
 
        while (entry->nr < PERF_MAX_STACK_DEPTH &&
index 6fbfea61f7bb7315d7c19f15db0df6ae8f8bd697..4beff97e2033f14efc1094b979a9f887a8525259 100644 (file)
@@ -362,9 +362,6 @@ typedef pte_t *pte_addr_t;
 
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /* No page table caches to initialize (?) */
 #define pgtable_cache_init()   do { } while(0)
 
index b8663921d3c10520f5ad68a70ae57980f5063ac3..0b049019eba70335dd3a380163979cf6f5614511 100644 (file)
@@ -88,7 +88,6 @@ extern char empty_zero_page[];
  * No page table caches to initialise.
  */
 #define pgtable_cache_init()   do { } while (0)
-#define io_remap_pfn_range      remap_pfn_range
 
 /*
  * All 32bit addresses are effectively valid for vmalloc...
index 38a4312eb2cb32482ff82b172e2a37136597bff2..c0eed5b18860fa4c271e0dd6fe1887d1ed039f35 100644 (file)
@@ -71,7 +71,6 @@ extern unsigned long empty_zero_page;
  * No page table caches to initialise
  */
 #define pgtable_cache_init()   do { } while (0)
-#define io_remap_pfn_range      remap_pfn_range
 
 #include <asm-generic/pgtable.h>
 
index 7df4301383558410a18def3788b743d4d523251d..8b8c86793225fd1f1df33a334cf1ec41bd581d78 100644 (file)
@@ -258,9 +258,6 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
 #define pgd_ERROR(e) \
         printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)         \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
 
index 6bc241e4b4f8212b386758586693d58e32e3a426..eb0110acd19b72331c040fd5f9e510ce60c2109e 100644 (file)
@@ -488,9 +488,6 @@ static inline int pte_file(pte_t pte)
 #define PageSkip(page)         (0)
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
index 62ef17676b406274a76ddf5d3a51f7091a9ace97..7ca20f894dd794b33e2c0735eed92bd8329c9a27 100644 (file)
@@ -52,9 +52,6 @@ extern int is_in_rom(unsigned long);
  */
 #define pgtable_cache_init()   do { } while (0)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /*
  * All 32bit addresses are effectively valid for vmalloc...
  * Sort of meaningless for non-VM targets.
index 20d55f69fe5571cd6a32a1515dbf1db7eb70db61..d8bd54fa431ef91096e84d296c47155f26a44ef0 100644 (file)
@@ -452,10 +452,6 @@ static inline int pte_exec(pte_t pte)
 
 #define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
-/* Nothing special about IO remapping at this point */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /*  I think this is in case we have page table caches; needed by init/main.c  */
 #define pgtable_cache_init()    do { } while (0)
 
index 1bf2cf2f4ab45fb6f965ec0708494cd515348593..cec6c06b52c0fba49a53bb1ee7a925520bf27b9d 100644 (file)
@@ -11,6 +11,7 @@
 #define _ASM_IA64_IRQFLAGS_H
 
 #include <asm/pal.h>
+#include <asm/kregs.h>
 
 #ifdef CONFIG_IA64_DEBUG_IRQ
 extern unsigned long last_cli_ip;
index 815810cbbedccdb4a3043e186e67904a27c9c471..7935115398a6679ec560a4f7ef08d2cc95724988 100644 (file)
@@ -493,9 +493,6 @@ extern void paging_init (void);
 #define pte_to_pgoff(pte)              ((pte_val(pte) << 1) >> 3)
 #define pgoff_to_pte(off)              ((pte_t) { ((off) << 2) | _PAGE_FILE })
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
index 8a28cfea27297694aa12328e579dd6f44a385d3e..103ce6710f0724b2eb29bb8157f537d3910b3b45 100644 (file)
@@ -347,9 +347,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
index dc35e0e106e4b1d9c3aa817248f7a642796891af..9f5abbda1ea72f89b0b0c6663860aab7604e11cc 100644 (file)
@@ -135,9 +135,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /* MMU-specific headers */
 
 #ifdef CONFIG_SUN3
index 037028f4ab7033c55557ade43c6a959c89f9a0b5..c527fc2ecf82fc8fe7df959b2be401a236b3099e 100644 (file)
@@ -55,9 +55,6 @@ extern unsigned int kobjsize(const void *objp);
  */
 #define pgtable_cache_init()   do { } while (0)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /*
  * All 32bit addresses are effectively valid for vmalloc...
  * Sort of meaningless for non-VM targets.
index f545477e61f3486d2f87b9a9b169993a324c8bed..471f481e67f3ebb07b34364890dc9a3feb56b013 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_METAG_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
index 1cd13d5951981dd9d0e336fb3a519dd4b9c0ac54..0d9dc5487296864c855ffe8069eaa3920696d9b1 100644 (file)
@@ -333,9 +333,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /*
  * No page table caches to initialise
  */
index a7311cd9dee0e34841d215f505ca151c2864204c..95cef0b5f836627b500bb6d03e5730c13c3314ff 100644 (file)
@@ -13,9 +13,6 @@
 
 #include <asm/setup.h>
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #ifndef __ASSEMBLY__
 extern int mem_init_done;
 #endif
index 8b8f6b39336350b8c570b30d990f4b16332e29db..008324d1c2612a75475d7333b5e960718b042bae 100644 (file)
@@ -394,9 +394,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
        phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
        return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
 }
-#else
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range io_remap_pfn_range
 #endif
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index e2e69e1e9fe14fac77226b2f857db6d1b09b054e..44dd5aa2e36f2d44fdc78ef9baca79234c80d510 100644 (file)
@@ -257,7 +257,9 @@ static int __init bcm1480_pcibios_init(void)
        register_pci_controller(&bcm1480_controller);
 
 #ifdef CONFIG_VGA_CONSOLE
-       take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
+       console_lock();
+       do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
+       console_unlock();
 #endif
        return 0;
 }
index cdefcc4cb8d448c36b3bd4a52f448e3288ae32f9..fc634aeda4a576fd6735cda7cd2779834bb5c881 100644 (file)
@@ -283,7 +283,9 @@ static int __init sb1250_pcibios_init(void)
        register_pci_controller(&sb1250_controller);
 
 #ifdef CONFIG_VGA_CONSOLE
-       take_over_console(&vga_con, 0, MAX_NR_CONSOLES - 1, 1);
+       console_lock();
+       do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES - 1, 1);
+       console_unlock();
 #endif
        return 0;
 }
index 678f68d5f37bb7ef819ab900302ff9e14c61705d..8730c0a3c37d222630da2eba69525a084da59c70 100644 (file)
@@ -13,9 +13,8 @@
 #define _ASM_IRQFLAGS_H
 
 #include <asm/cpu-regs.h>
-#ifndef __ASSEMBLY__
-#include <linux/smp.h>
-#endif
+/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
+#include <asm/smp.h>
 
 /*
  * interrupt control
index a1e894b5f65b9bab8e51d99010e80172c7696dd3..2ddaa67e79834a6da2286377f8e66f304ad684e3 100644 (file)
@@ -486,9 +486,6 @@ extern void update_mmu_cache(struct vm_area_struct *vma,
 
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
-       remap_pfn_range((vma), (vaddr), (pfn), (size), (prot))
-
 #define MK_IOSPACE_PFN(space, pfn)     (pfn)
 #define GET_IOSPACE(pfn)               0
 #define GET_PFN(pfn)                   (pfn)
index 6745dbe649441d906cc76de00df7c496961ba1d7..56c42417d428777ef3f928136dcd8b9d60df9b84 100644 (file)
@@ -24,6 +24,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/threads.h>
 #include <linux/cpumask.h>
+#include <linux/thread_info.h>
 #endif
 
 #ifdef CONFIG_SMP
@@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map;
 extern void smp_init_cpus(void);
 extern void smp_cache_interrupt(void);
 extern void send_IPI_allbutself(int irq);
-extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu);
 #ifndef __ASSEMBLY__
 
 static inline void smp_init_cpus(void) {}
+#define raw_smp_processor_id() 0
 
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_SMP */
index 780560b330d9ef9109a76b0210185f3bc178ca8e..d7966e0f76988cfb2d36d0b480277961dcef7e3a 100644 (file)
@@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; };
 
 #define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
-       const __typeof__(ptr) __guc_ptr = (ptr);                        \
+       const __typeof__(*(ptr))* __guc_ptr = (ptr);                    \
        int _e;                                                         \
        if (likely(__access_ok((unsigned long) __guc_ptr, (size))))     \
                _e = __get_user_nocheck((x), __guc_ptr, (size));        \
index 33c3bd1e5c6db1b03eef5b354495bc7a628541c5..ebac9c11f7966cbb6a52ebeefa7bb17579d727d6 100644 (file)
@@ -38,6 +38,7 @@ struct mn10300_cpuinfo boot_cpu_data;
 /* For PCI or other memory-mapped resources */
 unsigned long pci_mem_start = 0x18000000;
 
+static char __initdata cmd_line[COMMAND_LINE_SIZE];
 char redboot_command_line[COMMAND_LINE_SIZE] =
        "console=ttyS0,115200 root=/dev/mtdblock3 rw";
 
@@ -74,45 +75,19 @@ static const char *const mn10300_cputypes[] = {
 };
 
 /*
- *
+ * Pick out the memory size.  We look for mem=size,
+ * where size is "size[KkMm]"
  */
-static void __init parse_mem_cmdline(char **cmdline_p)
+static int __init early_mem(char *p)
 {
-       char *from, *to, c;
-
-       /* save unparsed command line copy for /proc/cmdline */
-       strcpy(boot_command_line, redboot_command_line);
-
-       /* see if there's an explicit memory size option */
-       from = redboot_command_line;
-       to = redboot_command_line;
-       c = ' ';
-
-       for (;;) {
-               if (c == ' ' && !memcmp(from, "mem=", 4)) {
-                       if (to != redboot_command_line)
-                               to--;
-                       memory_size = memparse(from + 4, &from);
-               }
-
-               c = *(from++);
-               if (!c)
-                       break;
-
-               *(to++) = c;
-       }
-
-       *to = '\0';
-       *cmdline_p = redboot_command_line;
+       memory_size = memparse(p, &p);
 
        if (memory_size == 0)
                panic("Memory size not known\n");
 
-       memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
-               memory_size;
-       if (memory_end > phys_memory_end)
-               memory_end = phys_memory_end;
+       return 0;
 }
+early_param("mem", early_mem);
 
 /*
  * architecture specific setup
@@ -125,7 +100,20 @@ void __init setup_arch(char **cmdline_p)
        cpu_init();
        unit_setup();
        smp_init_cpus();
-       parse_mem_cmdline(cmdline_p);
+
+       /* save unparsed command line copy for /proc/cmdline */
+       strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
+
+       /* populate cmd_line too for later use, preserving boot_command_line */
+       strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+       *cmdline_p = cmd_line;
+
+       parse_early_param();
+
+       memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
+               memory_size;
+       if (memory_end > phys_memory_end)
+               memory_end = phys_memory_end;
 
        init_mm.start_code = (unsigned long)&_text;
        init_mm.end_code = (unsigned long) &_etext;
index c4e2e79281e8de6fa3bf370032578a77fa337f9b..febb9cd83177177e4c0edffa6d4c2d34b4a2b2a0 100644 (file)
@@ -221,7 +221,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
        /* Leave vm_pgoff as-is, the PCI space address is the physical
         * address on this platform.
         */
-       vma->vm_flags |= VM_LOCKED | VM_IO;
+       vma->vm_flags |= VM_LOCKED;
 
        prot = pgprot_val(vma->vm_page_prot);
        prot &= ~_PAGE_CACHE;
index 14c900cfd30a2f90e26e8a6ff5bf08097eacd5b1..37bf6a3ef8f4f561e52bff3d96d6bd43ba6c5dd8 100644 (file)
@@ -446,9 +446,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #define kern_addr_valid(addr)           (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)         \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #include <asm-generic/pgtable.h>
 
 /*
index 838b479a42c4ebf0ea2a1e186a8f08891de37a2e..88d0962de65a8ca97c877b90dfb0b9ac59b0bfac 100644 (file)
@@ -60,6 +60,7 @@ struct hpux_dirent {
 };
 
 struct getdents_callback {
+       struct dir_context ctx;
        struct hpux_dirent __user *current_dir;
        struct hpux_dirent __user *previous;
        int count;
@@ -110,24 +111,23 @@ int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned i
 {
        struct fd arg;
        struct hpux_dirent __user * lastdirent;
-       struct getdents_callback buf;
+       struct getdents_callback buf = {
+               .ctx.actor = filldir,
+               .current_dir = dirent,
+               .count = count
+       };
        int error;
 
        arg = fdget(fd);
        if (!arg.file)
                return -EBADF;
 
-       buf.current_dir = dirent;
-       buf.previous = NULL;
-       buf.count = count;
-       buf.error = 0;
-
-       error = vfs_readdir(arg.file, filldir, &buf);
+       error = iterate_dir(arg.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               if (put_user(arg.file->f_pos, &lastdirent->d_off))
+               if (put_user(buf.ctx.pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
index cc50d33b7b881b7c57f7a46c125a56860840e514..b6b34a0987e7d7aafb42cd47d0c3b8f888007bd8 100644 (file)
@@ -27,7 +27,7 @@ extern struct node_map_data node_data[];
 
 #define PFNNID_SHIFT (30 - PAGE_SHIFT)
 #define PFNNID_MAP_MAX  512     /* support 512GB */
-extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
+extern signed char pfnnid_map[PFNNID_MAP_MAX];
 
 #ifndef CONFIG_64BIT
 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
@@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn)
        i = pfn >> PFNNID_SHIFT;
        BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
 
-       return (int)pfnnid_map[i];
+       return pfnnid_map[i];
 }
 
 static inline int pfn_valid(int pfn)
index 3234f492d5754b3935d03f73e3ead2bb940747c5..465154076d23f9c1191f6ff7dcc59cf7b61333de 100644 (file)
@@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
        return channel ? 15 : 14;
 }
 
+#define HAVE_PCI_MMAP
+
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+       enum pci_mmap_state mmap_state, int write_combine);
+
 #endif /* __ASM_PARISC_PCI_H */
index 1e40d7f86be389a3d8c0cff976784fca04489285..34899b5d959aa512e041267ac22302d4f06cb2a6 100644 (file)
@@ -506,9 +506,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 #endif
 
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
 
 /* We provide our own get_unmapped_area to provide cache coherency */
index 9e2d2e408529f744b7bf5b7830cec15ad48a6012..872275659d986f0b4357b35db888740895c2211e 100644 (file)
@@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = {
        {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
        {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
        {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
+       {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
        {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
        {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
        {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 
index 36d7f402e48edb8b8cb13dea9e302d1e84edfdf9..b743a80eaba0311e934eda70c76e7acbf8ae6a78 100644 (file)
@@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm)
 #endif
 
        ldil            L%dcache_stride, %r1
-       ldw             R%dcache_stride(%r1), %r1
+       ldw             R%dcache_stride(%r1), r31
 
 #ifdef CONFIG_64BIT
        depdi,z         1, 63-PAGE_SHIFT,1, %r25
@@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm)
        depwi,z         1, 31-PAGE_SHIFT,1, %r25
 #endif
        add             %r28, %r25, %r25
-       sub             %r25, %r1, %r25
-
-
-1:      fdc,m          %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
+       sub             %r25, r31, %r25
+
+
+1:      fdc,m          r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
        cmpb,COND(<<)           %r28, %r25,1b
-       fdc,m           %r1(%r28)
+       fdc,m           r31(%r28)
 
        sync
 
@@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm)
 #endif
 
        ldil            L%icache_stride, %r1
-       ldw             R%icache_stride(%r1), %r1
+       ldw             R%icache_stride(%r1), %r31
 
 #ifdef CONFIG_64BIT
        depdi,z         1, 63-PAGE_SHIFT,1, %r25
@@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm)
        depwi,z         1, 31-PAGE_SHIFT,1, %r25
 #endif
        add             %r28, %r25, %r25
-       sub             %r25, %r1, %r25
+       sub             %r25, %r31, %r25
 
 
        /* fic only has the type 26 form on PA1.1, requiring an
         * explicit space specification, so use %sr4 */
-1:      fic,m          %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
+1:      fic,m          %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
        cmpb,COND(<<)   %r28, %r25,1b
-       fic,m           %r1(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
 
        sync
 
index 60309051875e9887bcda5e584742ddc843d8585d..64f2764a8cef8778a0602262a5c5b10e4d68975d 100644 (file)
@@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 }
 
 
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+                       enum pci_mmap_state mmap_state, int write_combine)
+{
+       unsigned long prot;
+
+       /*
+        * I/O space can be accessed via normal processor loads and stores on
+        * this platform but for now we elect not to do this and portable
+        * drivers should not do this anyway.
+        */
+       if (mmap_state == pci_mmap_io)
+               return -EINVAL;
+
+       if (write_combine)
+               return -EINVAL;
+
+       /*
+        * Ignore write-combine; for now only return uncached mappings.
+        */
+       prot = pgprot_val(vma->vm_page_prot);
+       prot |= _PAGE_NO_CACHE;
+       vma->vm_page_prot = __pgprot(prot);
+
+       return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+               vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
 /*
  * A driver is enabling the device.  We make sure that all the appropriate
  * bits are set to allow the device to operate as the driver is expecting.
index 1e95b2000ce85650903e85a705206ed113540670..7349a3fedfc7644631d153427cef5bb2f69712ff 100644 (file)
@@ -156,7 +156,7 @@ void __init setup_arch(char **cmdline_p)
 #endif
 
 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
-       conswitchp = &dummy_con;        /* we use take_over_console() later ! */
+       conswitchp = &dummy_con;        /* we use do_take_over_console() later ! */
 #endif
 
 }
index 1c965642068b48d6b7102a5b6adb365f321f73e6..505b56c6b9b9c6dafa9df67b775c44eb1ebf1867 100644 (file)
@@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt
 
 #ifdef CONFIG_DISCONTIGMEM
 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
 #endif
 
 static struct resource data_resource = {
index 2966df604221909d38ddbf404a355540b2109a99..d0ece257d310527743007a60f01f60cc5f684754 100644 (file)
@@ -299,4 +299,53 @@ struct mpc512x_psc_fifo {
 #define rxdata_32 rxdata.rxdata_32
 };
 
+struct mpc5125_psc {
+       u8              mr1;                    /* PSC + 0x00 */
+       u8              reserved0[3];
+       u8              mr2;                    /* PSC + 0x04 */
+       u8              reserved1[3];
+       struct {
+               u16             status;         /* PSC + 0x08 */
+               u8              reserved2[2];
+               u8              clock_select;   /* PSC + 0x0c */
+               u8              reserved3[3];
+       } sr_csr;
+       u8              command;                /* PSC + 0x10 */
+       u8              reserved4[3];
+       union {                                 /* PSC + 0x14 */
+               u8              buffer_8;
+               u16             buffer_16;
+               u32             buffer_32;
+       } buffer;
+       struct {
+               u8              ipcr;           /* PSC + 0x18 */
+               u8              reserved5[3];
+               u8              acr;            /* PSC + 0x1c */
+               u8              reserved6[3];
+       } ipcr_acr;
+       struct {
+               u16             isr;            /* PSC + 0x20 */
+               u8              reserved7[2];
+               u16             imr;            /* PSC + 0x24 */
+               u8              reserved8[2];
+       } isr_imr;
+       u8              ctur;                   /* PSC + 0x28 */
+       u8              reserved9[3];
+       u8              ctlr;                   /* PSC + 0x2c */
+       u8              reserved10[3];
+       u32             ccr;                    /* PSC + 0x30 */
+       u32             ac97slots;              /* PSC + 0x34 */
+       u32             ac97cmd;                /* PSC + 0x38 */
+       u32             ac97data;               /* PSC + 0x3c */
+       u8              reserved11[4];
+       u8              ip;                     /* PSC + 0x44 */
+       u8              reserved12[3];
+       u8              op1;                    /* PSC + 0x48 */
+       u8              reserved13[3];
+       u8              op0;                    /* PSC + 0x4c */
+       u8              reserved14[3];
+       u32             sicr;                   /* PSC + 0x50 */
+       u8              reserved15[4];  /* make eq. sizeof(mpc52xx_psc) */
+};
+
 #endif  /* __ASM_MPC52xx_PSC_H__ */
index 7aeb9555f6eac40a69cfb11bddf924b221add6ee..b6293d26bd39e2d1d9d97ec9775838c23238e4ba 100644 (file)
@@ -198,9 +198,6 @@ extern void paging_init(void);
  */
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #include <asm-generic/pgtable.h>
 
 
index eabeec991016b0c6dbbc5d3c4b40d8cf30bfecfb..f46914a0f33ea6fd3cacbdddb76288722dfb42cb 100644 (file)
@@ -994,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
                ppc_md.pci_dma_bus_setup(bus);
 }
 
-void pcibios_setup_device(struct pci_dev *dev)
+static void pcibios_setup_device(struct pci_dev *dev)
 {
        /* Fixup NUMA node as it may not be setup yet by the generic
         * code and is needed by the DMA init
@@ -1015,6 +1015,17 @@ void pcibios_setup_device(struct pci_dev *dev)
                ppc_md.pci_irq_fixup(dev);
 }
 
+int pcibios_add_device(struct pci_dev *dev)
+{
+       /*
+        * We can only call pcibios_setup_device() after bus setup is complete,
+        * since some of the platform specific DMA setup code depends on it.
+        */
+       if (dev->bus->is_added)
+               pcibios_setup_device(dev);
+       return 0;
+}
+
 void pcibios_setup_bus_devices(struct pci_bus *bus)
 {
        struct pci_dev *dev;
@@ -1469,10 +1480,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
                if (ppc_md.pcibios_enable_device_hook(dev))
                        return -EINVAL;
 
-       /* avoid pcie irq fix up impact on cardbus */
-       if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
-               pcibios_setup_device(dev);
-
        return pci_enable_resources(dev, mask);
 }
 
index 5cd7ad0c11764ec59432d945b5d824ea05f6603d..1a1b511897733da58ec1a1b79d6c9a66ffdedea2 100644 (file)
@@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                ret = s;
                goto out;
        }
-       kvmppc_lazy_ee_enable();
 
        kvm_guest_enter();
 
@@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        kvmppc_load_guest_fp(vcpu);
 #endif
 
+       kvmppc_lazy_ee_enable();
+
        ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
        /* No need for kvm_guest_exit. It's done in handle_exit.
index 237c8e5f2640b79dce83c4bce7f7f66ef09fe02f..77fdd2cef33b5a2c18ac40c442645d9dc38365ad 100644 (file)
@@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
        do {
                pmd = pmd_offset(pud, addr);
                next = pmd_addr_end(addr, end);
-               if (pmd_none_or_clear_bad(pmd))
+               if (!is_hugepd(pmd)) {
+                       /*
+                        * if it is not hugepd pointer, we should already find
+                        * it cleared.
+                        */
+                       WARN_ON(!pmd_none_or_clear_bad(pmd));
                        continue;
+               }
 #ifdef CONFIG_PPC_FSL_BOOK3E
                /*
                 * Increment next by the size of the huge mapping since
index 35f77a42bedf7e5d1324b170faa51cd1689541d3..f3900427ffab5173ee05041a7375b9633312e835 100644 (file)
@@ -238,7 +238,7 @@ const struct file_operations spufs_context_fops = {
        .release        = spufs_dir_close,
        .llseek         = dcache_dir_lseek,
        .read           = generic_read_dir,
-       .readdir        = dcache_readdir,
+       .iterate        = dcache_readdir,
        .fsync          = noop_fsync,
 };
 EXPORT_SYMBOL_GPL(spufs_context_fops);
index 5a4c87903057f46c6bfb7466ca87fdbc653cb1c0..5ce3ba7ad1372a6de232692e23b3c411bc1ed33f 100644 (file)
@@ -294,8 +294,6 @@ void __init eeh_addr_cache_build(void)
        spin_lock_init(&pci_io_addr_cache_root.piar_lock);
 
        for_each_pci_dev(dev) {
-               eeh_addr_cache_insert_dev(dev);
-
                dn = pci_device_to_OF_node(dev);
                if (!dn)
                        continue;
@@ -308,6 +306,8 @@ void __init eeh_addr_cache_build(void)
                dev->dev.archdata.edev = edev;
                edev->pdev = dev;
 
+               eeh_addr_cache_insert_dev(dev);
+
                eeh_sysfs_add_device(dev);
        }
 
index fe43d1aa2cf1cafcf0b1ef2cae3f9222be7bdce8..9d4a9e8562b2229a7791bb8b069dc13c9c2c9a73 100644 (file)
@@ -639,7 +639,8 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
 
        if (pe->type & EEH_PE_PHB) {
                bus = pe->phb->bus;
-       } else if (pe->type & EEH_PE_BUS) {
+       } else if (pe->type & EEH_PE_BUS ||
+                  pe->type & EEH_PE_DEVICE) {
                edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
                pdev = eeh_dev_to_pci_dev(edev);
                if (pdev)
index 028ac1f71b51a9490f8a3fe0ab889f81b0ca5e62..46ac1ddea6832107b045f0391ca3c4f1d86be579 100644 (file)
@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
        return indirect_read_config(bus, devfn, offset, len, val);
 }
 
-static struct pci_ops fsl_indirect_pci_ops =
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+
+static struct pci_ops fsl_indirect_pcie_ops =
 {
        .read = fsl_indirect_read_config,
        .write = indirect_write_config,
 };
 
-static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
-                                         resource_size_t cfg_addr,
-                                         resource_size_t cfg_data, u32 flags)
-{
-       setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
-       hose->ops = &fsl_indirect_pci_ops;
-}
-
-#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
-
 #define MAX_PHYS_ADDR_BITS     40
 static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
 
@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
        if (!hose->private_data)
                goto no_bridge;
 
-       fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
-                              PPC_INDIRECT_TYPE_BIG_ENDIAN);
+       setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
+                          PPC_INDIRECT_TYPE_BIG_ENDIAN);
 
        if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
                hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 
        if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
+               /* use fsl_indirect_read_config for PCIe */
+               hose->ops = &fsl_indirect_pcie_ops;
                /* For PCIE read HEADER_TYPE to identify controler mode */
                early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
                if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
                if (ret)
                        goto err0;
        } else {
-               fsl_setup_indirect_pci(hose, rsrc_cfg.start,
-                                      rsrc_cfg.start + 4, 0);
+               setup_indirect_pci(hose, rsrc_cfg.start,
+                                  rsrc_cfg.start + 4, 0);
        }
 
        printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
index 886ac7d4937a85c8cbe25d9ca52fd67d8cd28795..2f8c1abeb086999ada3a2938973b8054f112625e 100644 (file)
@@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
+       debug_dma_mapping_error(dev, dma_addr);
        if (dma_ops->mapping_error)
                return dma_ops->mapping_error(dev, dma_addr);
-       return (dma_addr == 0UL);
+       return (dma_addr == DMA_ERROR_CODE);
 }
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
index e8b6e5b8932c39fa7ff0d824d5c60f4ebe5e5fcf..9aefa3c64eb2d32ab756512e035762613277caa8 100644 (file)
@@ -58,9 +58,6 @@ extern unsigned long zero_page_mask;
 #define __HAVE_COLOR_ZERO_PAGE
 
 /* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)               \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #endif /* !__ASSEMBLY__ */
 
 /*
index d8a6a385d0480dcd46e4dba51642532d83ea7889..feb719d3c85160b586dd6af4d936f363936c4419 100644 (file)
@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
        .write = reipl_fcp_scpdata_write,
 };
 
-DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
                   reipl_block_fcp->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
                   reipl_block_fcp->ipl_info.fcp.lun);
 DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
                   reipl_block_fcp->ipl_info.fcp.bootprog);
@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
 
 /* FCP dump device attributes */
 
-DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
                   dump_block_fcp->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
                   dump_block_fcp->ipl_info.fcp.lun);
 DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
                   dump_block_fcp->ipl_info.fcp.bootprog);
index 408e866ae548d3ec75e3e5ac672e61531d06b952..dd3c1994b8bd405c5fe986d99f5746893fa585a5 100644 (file)
@@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void)
 }
 EXPORT_SYMBOL(measurement_alert_subclass_unregister);
 
+#ifdef CONFIG_SMP
 void synchronize_irq(unsigned int irq)
 {
        /*
@@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq)
         */
 }
 EXPORT_SYMBOL_GPL(synchronize_irq);
+#endif
 
 #ifndef CONFIG_PCI
 
index 3cbd3b8bf3113c20111a96e22d62ed495aefa1e2..cca388253a39f5636af7e10f0d416589abca59df 100644 (file)
@@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
                        continue;
                } else if ((addr <= chunk->addr) &&
                           (addr + size >= chunk->addr + chunk->size)) {
-                       memset(chunk, 0 , sizeof(*chunk));
+                       memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
+                       memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
                } else if (addr + size < chunk->addr + chunk->size) {
                        chunk->size =  chunk->addr + chunk->size - addr - size;
                        chunk->addr = addr + size;
index 2fd469807683fadfe15449e2474ec1f35f2fb921..db96ad9afc03ed6c64246887c3257f64c8464f65 100644 (file)
@@ -113,9 +113,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 #define pte_clear(mm, addr, xp)                \
        do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 /*
  * The "pgd_xxx()" functions here are trivial for a folded two-level
  * setup: the pgd is never bad, and a pmd always exists (as it's folded
index 9210e93a92c337f18c94fd962bbeb5e23cfbb739..cf434c64408dd424b55bdf0bdb629fa4f7e137fd 100644 (file)
@@ -124,9 +124,6 @@ typedef pte_t *pte_addr_t;
 
 #define kern_addr_valid(addr)  (1)
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #define pte_pfn(x)             ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 
 /*
index ff18e3cfb6b1ffe3e75df94fdd68319b912ab1d8..7e4a97fbded412158d845926026987290907902c 100644 (file)
@@ -6,6 +6,7 @@ generic-y += cputime.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h
+generic-y += linkage.h
 generic-y += local64.h
 generic-y += mutex.h
 generic-y += irq_regs.h
index 15a716934e4dd790d589678af3ae58369d81ddf6..b836e9297f2a71c3084df4cd695109474856d72d 100644 (file)
@@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
 
 #ifdef CONFIG_SMP
 # define LEON3_IRQ_IPI_DEFAULT         13
-# define LEON3_IRQ_TICKER              (leon3_ticker_irq)
+# define LEON3_IRQ_TICKER              (leon3_gptimer_irq)
 # define LEON3_IRQ_CROSS_CALL          15
 #endif
 
index f3034eddf4682569c257a2ce19941b013a98c218..24ec48c3ff90ae57125c5e768471195307c60efc 100644 (file)
@@ -47,6 +47,7 @@ struct amba_prom_registers {
 #define LEON3_GPTIMER_LD 4
 #define LEON3_GPTIMER_IRQEN 8
 #define LEON3_GPTIMER_SEPIRQ 8
+#define LEON3_GPTIMER_TIMERS 0x7
 
 #define LEON23_REG_TIMER_CONTROL_EN    0x00000001 /* 1 = enable counting */
 /* 0 = hold scalar and counter */
diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h
deleted file mode 100644 (file)
index 291c2d0..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_LINKAGE_H
-#define __ASM_LINKAGE_H
-
-/* Nothing to see here... */
-
-#endif
index 6fc13483f70222cb3ea1da047a005447b80cc0cb..502f632f6cc73e826ded589a87c70e7e80b6d514 100644 (file)
@@ -443,6 +443,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 
        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 }
+#define io_remap_pfn_range io_remap_pfn_range 
 
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
index 7619f2f792aff549905ca49d1a70b3cd7514979c..79c214efa3febd16e442f6cb6655af8255bea415 100644 (file)
@@ -914,6 +914,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 
        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 }
+#define io_remap_pfn_range io_remap_pfn_range 
 
 #include <asm/tlbflush.h>
 #include <asm-generic/pgtable.h>
index 75bb608c423eacb459321c5628ee45d6ce769ba6..5ef48dab563694db95935564e5df92172fd67bab 100644 (file)
@@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command)
                unsigned long len;
 
                strcpy(full_boot_str, "boot ");
-               strcpy(full_boot_str + strlen("boot "), boot_command);
+               strlcpy(full_boot_str + strlen("boot "), boot_command,
+                       sizeof(full_boot_str + strlen("boot ")));
                len = strlen(full_boot_str);
 
                if (reboot_data_supported) {
index 7c0231dabe445f1021190aafe4527b68c113a421..b7c68976cbc7568360dfde49044f70a4aac9ffb7 100644 (file)
@@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock);
 
 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
 unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
-int leon3_ticker_irq; /* Timer ticker IRQ */
 unsigned int sparc_leon_eirq;
 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
 
        leon_clear_profile_irq(cpu);
 
+       if (cpu == boot_cpu_id)
+               timer_interrupt(irq, NULL);
+
        ce = &per_cpu(sparc32_clockevent, cpu);
 
        irq_enter();
@@ -299,6 +301,7 @@ void __init leon_init_timers(void)
        int icsel;
        int ampopts;
        int err;
+       u32 config;
 
        sparc_config.get_cycles_offset = leon_cycles_offset;
        sparc_config.cs_period = 1000000 / HZ;
@@ -377,23 +380,6 @@ void __init leon_init_timers(void)
        LEON3_BYPASS_STORE_PA(
                        &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
 
-#ifdef CONFIG_SMP
-       leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
-
-       if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
-             (1<<LEON3_GPTIMER_SEPIRQ))) {
-               printk(KERN_ERR "timer not configured with separate irqs\n");
-               BUG();
-       }
-
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
-                               0);
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
-                               (((1000000/HZ) - 1)));
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
-                               0);
-#endif
-
        /*
         * The IRQ controller may (if implemented) consist of multiple
         * IRQ controllers, each mapped on a 4Kb boundary.
@@ -416,13 +402,6 @@ void __init leon_init_timers(void)
        if (eirq != 0)
                leon_eirq_setup(eirq);
 
-       irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
-       err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
-       if (err) {
-               printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
-               prom_halt();
-       }
-
 #ifdef CONFIG_SMP
        {
                unsigned long flags;
@@ -439,30 +418,31 @@ void __init leon_init_timers(void)
        }
 #endif
 
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
-                             LEON3_GPTIMER_EN |
-                             LEON3_GPTIMER_RL |
-                             LEON3_GPTIMER_LD |
-                             LEON3_GPTIMER_IRQEN);
+       config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
+       if (config & (1 << LEON3_GPTIMER_SEPIRQ))
+               leon3_gptimer_irq += leon3_gptimer_idx;
+       else if ((config & LEON3_GPTIMER_TIMERS) > 1)
+               pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
 
 #ifdef CONFIG_SMP
        /* Install per-cpu IRQ handler for broadcasted ticker */
-       irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
+       irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
                                    "per-cpu", 0);
        err = request_irq(irq, leon_percpu_timer_ce_interrupt,
-                         IRQF_PERCPU | IRQF_TIMER, "ticker",
-                         NULL);
+                         IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
+#else
+       irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
+       err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
+#endif
        if (err) {
-               printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
+               pr_err("Unable to attach timer IRQ%d\n", irq);
                prom_halt();
        }
-
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
                              LEON3_GPTIMER_EN |
                              LEON3_GPTIMER_RL |
                              LEON3_GPTIMER_LD |
                              LEON3_GPTIMER_IRQEN);
-#endif
        return;
 bad:
        printk(KERN_ERR "No Timer/irqctrl found\n");
index 7739a54315e2dfdf71f5fff8c21f407046059892..6df26e37f8790c700c21a6b5651c4c84cf2ad874 100644 (file)
@@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev)
 
        /* find device register base address */
        res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
-       regs = devm_request_and_ioremap(&ofdev->dev, res);
-       if (!regs) {
-               dev_err(&ofdev->dev, "io-regs mapping failed\n");
-               return -EADDRNOTAVAIL;
-       }
+       regs = devm_ioremap_resource(&ofdev->dev, res);
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
 
        /*
         * check that we're in Host Slot and that we can act as a Host Bridge
index bdf53d9a8d460041e9237ebbfd219bb180879a86..b0b3967a2dd2d1e6ca46571d635a43b02e38fa75 100644 (file)
@@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void)
         * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
         */
        register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+
+       /* Interrupts need to be enabled to not hang the CPU */
+       local_irq_enable();
+
        __asm__ __volatile__ (
                "wr     %%g0, %%asr19\n"
                "lda    [%0] %1, %%g0\n"
@@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void)
  */
 void pmc_leon_idle(void)
 {
+       /* Interrupts need to be enabled to not hang the CPU */
+       local_irq_enable();
+
        /* For systems without power-down, this will be no-op */
        __asm__ __volatile__ ("wr       %g0, %asr19\n\t");
 }
index baf4366e2d6afe937db5fe792c3183d56da4c580..2031c65fd4ea2ec209460bf3d7c0b5bf0aa4a283 100644 (file)
@@ -773,15 +773,6 @@ static int __pci_mmap_make_offset(struct pci_dev *pdev,
        return 0;
 }
 
-/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
- * mapping.
- */
-static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
-                                           enum pci_mmap_state mmap_state)
-{
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-}
-
 /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
  * device mapping.
  */
@@ -809,7 +800,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
        if (ret < 0)
                return ret;
 
-       __pci_mmap_set_flags(dev, vma, mmap_state);
        __pci_mmap_set_pgprot(dev, vma, mmap_state);
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
index 38bf80a22f02396704fbb0f69ccef74fd83d689b..1434526970a6bf446a495d275eab9f46073a4172 100644 (file)
@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
 
        /* Initialize PROM console and command line. */
        *cmdline_p = prom_getbootargs();
-       strcpy(boot_command_line, *cmdline_p);
+       strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
        parse_early_param();
 
        boot_flags_init(*cmdline_p);
index 88a127b9c69e8b1876a94b60db94456eca6fe5ef..13785547e435274bb6fa079968a9d516dc25800f 100644 (file)
@@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p)
 {
        /* Initialize PROM console and command line. */
        *cmdline_p = prom_getbootargs();
-       strcpy(boot_command_line, *cmdline_p);
+       strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
        parse_early_param();
 
        boot_flags_init(*cmdline_p);
index a7171997adfdd4b7b5bf9154528b94c31b0a3bb3..04fd55a6e4613ae009d6f53abce4fbf3f043bc8e 100644 (file)
@@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
                m->size = *val;
                val = mdesc_get_property(md, node,
                                         "address-congruence-offset", NULL);
-               m->offset = *val;
+
+               /* The address-congruence-offset property is optional.
+                * Explicity zero it be identifty this.
+                */
+               if (val)
+                       m->offset = *val;
+               else
+                       m->offset = 0UL;
 
                numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
                        count - 1, m->base, m->size, m->offset);
index 83d89bcb44afcace24b757b02ce5305c1e762afe..37e7bc4c95b373aad3c9dbac1f16fe172b205890 100644 (file)
@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
        }
 
        if (!tb->active) {
-               global_flush_tlb_page(mm, vaddr);
                flush_tsb_user_page(mm, vaddr);
+               global_flush_tlb_page(mm, vaddr);
                goto out;
        }
 
index f5ec32e0d419b550313c71c344816808da7a4ab5..d2b49d2365e76d962db243aca29735d85d564dfb 100644 (file)
@@ -23,23 +23,25 @@ prom_getbootargs(void)
                return barg_buf;
        }
 
-       switch(prom_vers) {
+       switch (prom_vers) {
        case PROM_V0:
                cp = barg_buf;
                /* Start from 1 and go over fd(0,0,0)kernel */
-               for(iter = 1; iter < 8; iter++) {
+               for (iter = 1; iter < 8; iter++) {
                        arg = (*(romvec->pv_v0bootargs))->argv[iter];
                        if (arg == NULL)
                                break;
-                       while(*arg != 0) {
+                       while (*arg != 0) {
                                /* Leave place for space and null. */
-                               if(cp >= barg_buf + BARG_LEN-2){
+                               if (cp >= barg_buf + BARG_LEN - 2)
                                        /* We might issue a warning here. */
                                        break;
-                               }
                                *cp++ = *arg++;
                        }
                        *cp++ = ' ';
+                       if (cp >= barg_buf + BARG_LEN - 1)
+                               /* We might issue a warning here. */
+                               break;
                }
                *cp = 0;
                break;
index 92204c3800b57afe3366ffa49121001323c06243..bd1b2a3ac34e3a9cd82df45c4fedb86b34614ef0 100644 (file)
@@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node)
        return prom_node_to_node("child", node);
 }
 
-inline phandle prom_getchild(phandle node)
+phandle prom_getchild(phandle node)
 {
        phandle cnode;
 
@@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node)
        return prom_node_to_node(prom_peer_name, node);
 }
 
-inline phandle prom_getsibling(phandle node)
+phandle prom_getsibling(phandle node)
 {
        phandle sibnode;
 
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
 /* Return the length in bytes of property 'prop' at node 'node'.
  * Return -1 on error.
  */
-inline int prom_getproplen(phandle node, const char *prop)
+int prom_getproplen(phandle node, const char *prop)
 {
        unsigned long args[6];
 
@@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen);
  * 'buffer' which has a size of 'bufsize'.  If the acquisition
  * was successful the length will be returned, else -1 is returned.
  */
-inline int prom_getproperty(phandle node, const char *prop,
-                           char *buffer, int bufsize)
+int prom_getproperty(phandle node, const char *prop,
+                    char *buffer, int bufsize)
 {
        unsigned long args[8];
        int plen;
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
 /* Acquire an integer property and return its value.  Returns -1
  * on failure.
  */
-inline int prom_getint(phandle node, const char *prop)
+int prom_getint(phandle node, const char *prop)
 {
        int intprop;
 
@@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
  */
-inline char *prom_firstprop(phandle node, char *buffer)
+char *prom_firstprop(phandle node, char *buffer)
 {
        unsigned long args[7];
 
@@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
  * at node 'node' .  Returns NULL string if no more
  * property types for this node.
  */
-inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
+char *prom_nextprop(phandle node, const char *oprop, char *buffer)
 {
        unsigned long args[7];
        char buf[32];
index 73b1a4c9ad03f22c218f40896cbb2fdacf3142ce..33587f16c1527ea0db284514564abfb8379e6eb9 100644 (file)
@@ -362,9 +362,6 @@ do {                                                \
 #define kern_addr_valid(addr)  (1)
 #endif /* CONFIG_FLATMEM */
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 extern void vmalloc_sync_all(void);
 
 #endif /* !__ASSEMBLY__ */
index 4385cb6fa00ade132dc1ba9fb3fda09fbc775151..a93b02a252227c3b8bd40929e663e79f3f757862 100644 (file)
@@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
 EXPORT_SYMBOL(__ashrdi3);
 uint64_t __ashldi3(uint64_t, unsigned int);
 EXPORT_SYMBOL(__ashldi3);
+int __ffsdi2(uint64_t);
+EXPORT_SYMBOL(__ffsdi2);
 #endif
index d7d21851e60c12f0c3f64933a8c8e2856586ac46..3df3bd544492012ab9b78b0c43ab2648a1760e61 100644 (file)
@@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req)
        }
 
        do {
-               loff_t pos;
+               loff_t pos = file->f_pos;
                mm_segment_t old_fs = get_fs();
                set_fs(KERNEL_DS);
                len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
index ae02909a18752adebe5657aef42c620a629d3293..bf974f712af7e9929fb6ee4908eda145b1fe1a14 100644 (file)
@@ -69,8 +69,6 @@ extern unsigned long end_iomem;
 #define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define PAGE_KERNEL_EXEC       __pgprot(__PAGE_KERNEL_EXEC)
 
-#define io_remap_pfn_range     remap_pfn_range
-
 /*
  * The i386 can't do page protection for execute, and considers that the same
  * are read.
index 68b2f297ac97c902843aa4e7b172639ebe2c7c4d..233c25880df403390d0c191c97a486d3e02a1ae5 100644 (file)
@@ -303,13 +303,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 #include <asm-generic/pgtable.h>
 
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-#define io_remap_pfn_range(vma, from, pfn, size, prot) \
-               remap_pfn_range(vma, from, pfn, size, prot)
-
 #define pgtable_cache_init() do { } while (0)
 
 #endif /* !__ASSEMBLY__ */
index 685692c94f051a8a7ad582442efcc3fa11173332..fe120da25625b23ef52c74b479bf715f265261b4 100644 (file)
@@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
 config IA32_EMULATION
        bool "IA32 Emulation"
        depends on X86_64
+       select BINFMT_ELF
        select COMPAT_BINFMT_ELF
        select HAVE_UID16
        ---help---
index 62fe22cd4cba5d33ec2434c79715267ff7031241..477e9d75149b8c62a0c971e37108b6b03ffa7668 100644 (file)
@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
        addq %rcx, KEYP
 
        movdqa IV, STATE1
-       pxor 0x00(INP), STATE1
+       movdqu 0x00(INP), INC
+       pxor INC, STATE1
        movdqu IV, 0x00(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE2
-       pxor 0x10(INP), STATE2
+       movdqu 0x10(INP), INC
+       pxor INC, STATE2
        movdqu IV, 0x10(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE3
-       pxor 0x20(INP), STATE3
+       movdqu 0x20(INP), INC
+       pxor INC, STATE3
        movdqu IV, 0x20(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE4
-       pxor 0x30(INP), STATE4
+       movdqu 0x30(INP), INC
+       pxor INC, STATE4
        movdqu IV, 0x30(OUTP)
 
        call *%r11
 
-       pxor 0x00(OUTP), STATE1
+       movdqu 0x00(OUTP), INC
+       pxor INC, STATE1
        movdqu STATE1, 0x00(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE1
-       pxor 0x40(INP), STATE1
+       movdqu 0x40(INP), INC
+       pxor INC, STATE1
        movdqu IV, 0x40(OUTP)
 
-       pxor 0x10(OUTP), STATE2
+       movdqu 0x10(OUTP), INC
+       pxor INC, STATE2
        movdqu STATE2, 0x10(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE2
-       pxor 0x50(INP), STATE2
+       movdqu 0x50(INP), INC
+       pxor INC, STATE2
        movdqu IV, 0x50(OUTP)
 
-       pxor 0x20(OUTP), STATE3
+       movdqu 0x20(OUTP), INC
+       pxor INC, STATE3
        movdqu STATE3, 0x20(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE3
-       pxor 0x60(INP), STATE3
+       movdqu 0x60(INP), INC
+       pxor INC, STATE3
        movdqu IV, 0x60(OUTP)
 
-       pxor 0x30(OUTP), STATE4
+       movdqu 0x30(OUTP), INC
+       pxor INC, STATE4
        movdqu STATE4, 0x30(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE4
-       pxor 0x70(INP), STATE4
+       movdqu 0x70(INP), INC
+       pxor INC, STATE4
        movdqu IV, 0x70(OUTP)
 
        _aesni_gf128mul_x_ble()
@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
 
        call *%r11
 
-       pxor 0x40(OUTP), STATE1
+       movdqu 0x40(OUTP), INC
+       pxor INC, STATE1
        movdqu STATE1, 0x40(OUTP)
 
-       pxor 0x50(OUTP), STATE2
+       movdqu 0x50(OUTP), INC
+       pxor INC, STATE2
        movdqu STATE2, 0x50(OUTP)
 
-       pxor 0x60(OUTP), STATE3
+       movdqu 0x60(OUTP), INC
+       pxor INC, STATE3
        movdqu STATE3, 0x60(OUTP)
 
-       pxor 0x70(OUTP), STATE4
+       movdqu 0x70(OUTP), INC
+       pxor INC, STATE4
        movdqu STATE4, 0x70(OUTP)
 
        ret
index 805078e080133bbdb1eab29f9ff742ba55a54955..52ff81cce008e6dbaed3d3a3b290ea6c88412897 100644 (file)
@@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
        /* struct user */
        DUMP_WRITE(&dump, sizeof(dump));
        /* Now dump all of the user data.  Include malloced stuff as well */
-       DUMP_SEEK(PAGE_SIZE);
+       DUMP_SEEK(PAGE_SIZE - sizeof(dump));
        /* now we start writing out the user space info */
        set_fs(USER_DS);
        /* Dump the data area */
index ba870bb6dd8ef30ab81a317a8eb43dcb83066630..57873beb32927426b3f5c5c97284729195d2c2e5 100644 (file)
@@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
 
 extern void init_ISA_irqs(void);
 
+#ifdef CONFIG_X86_LOCAL_APIC
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
 #endif /* _ASM_X86_IRQ_H */
index 6825e2efd1b411fdc8090e8218f391a93a8b6b91..6bc3985ee473d52ae74f16a2cec2e1fbd2fb693f 100644 (file)
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
 #ifdef CONFIG_MICROCODE_EARLY
 #define MAX_UCODE_COUNT 128
 extern void __init load_ucode_bsp(void);
-extern __init void load_ucode_ap(void);
+extern void __cpuinit load_ucode_ap(void);
 extern int __init save_microcode_in_initrd(void);
 #else
 static inline void __init load_ucode_bsp(void) {}
-static inline __init void load_ucode_ap(void) {}
+static inline void __cpuinit load_ucode_ap(void) {}
 static inline int __init save_microcode_in_initrd(void)
 {
        return 0;
index c0fa356e90de24ea60ba8a0ac90c39e6bb05d73b..86f9301903c818c632b227537b44142b71883c8d 100644 (file)
@@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
                        void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
-void arch_trigger_all_cpu_backtrace(void);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-#endif
+#endif /* CONFIG_X86_LOCAL_APIC */
 
 #define NMI_FLAG_FIRST 1
 
index 1e672234c4ffd1de71117d76ccc122801fb9e9de..5b0818bc89635cb6e27d5793fb4a32f83866c192 100644 (file)
@@ -506,9 +506,6 @@ static inline unsigned long pages_to_mb(unsigned long npg)
        return npg >> (20 - PAGE_SHIFT);
 }
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #if PAGETABLE_LEVELS > 2
 static inline int pud_none(pud_t pud)
 {
index 31cb9ae992b7d7921b03bd6173068bae3138a06e..a698d7165c96a5b8fc0c08c8ee7f419b52933b08 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 #include <linux/cpumask.h>
 #include <linux/kdebug.h>
index 35ffda5d0727d19b11cccccdde0e0d461b9e15ee..5f90b85ff22e584be8d1d7eb1615acde7584b397 100644 (file)
@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
        if (mtrr_tom2)
                x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
 
-       nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
        /*
         * [0, 1M) should always be covered by var mtrr with WB
         * and fixed mtrrs should take effect before var mtrr for it:
         */
-       nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
+       nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
                                        1ULL<<(20 - PAGE_SHIFT));
-       /* Sort the ranges: */
-       sort_range(range, nr_range);
+       /* add from var mtrr at last */
+       nr_range = x86_get_mtrr_mem_range(range, nr_range,
+                                         x_remove_base, x_remove_size);
 
        range_sums = sum_ranges(range, nr_range);
        printk(KERN_INFO "total RAM covered: %ldM\n",
index f60d41ff9a97fba11bf28808a65ed775e1605666..a9e22073bd56a755ea1952dea202eba63e350f7f 100644 (file)
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
        INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
-       INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
        EVENT_EXTRA_END
 };
 
 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
        INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+       INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
        EVENT_EXTRA_END
 };
 
index 9895a9a41380fe8d92666f5e1a06738ce9cbd809..211bce445522d541cc1bcc05e44e401b376f8093 100644 (file)
@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
        return insn.length;
 }
 
-static void __kprobes arch_copy_kprobe(struct kprobe *p)
+static int __kprobes arch_copy_kprobe(struct kprobe *p)
 {
+       int ret;
+
        /* Copy an instruction with recovering if other optprobe modifies it.*/
-       __copy_instruction(p->ainsn.insn, p->addr);
+       ret = __copy_instruction(p->ainsn.insn, p->addr);
+       if (!ret)
+               return -EINVAL;
 
        /*
         * __copy_instruction can modify the displacement of the instruction,
@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
 
        /* Also, displacement change doesn't affect the first byte */
        p->opcode = p->ainsn.insn[0];
+
+       return 0;
 }
 
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        p->ainsn.insn = get_insn_slot();
        if (!p->ainsn.insn)
                return -ENOMEM;
-       arch_copy_kprobe(p);
-       return 0;
+
+       return arch_copy_kprobe(p);
 }
 
 void __kprobes arch_arm_kprobe(struct kprobe *p)
index d2c381280e3cfd1a3b462ab0d43320f424e1e7bc..3dd37ebd591b36db493d449506d33a6b8915841f 100644 (file)
@@ -242,6 +242,7 @@ void __init kvmclock_init(void)
        if (!mem)
                return;
        hv_clock = __va(mem);
+       memset(hv_clock, 0, size);
 
        if (kvm_register_clock("boot clock")) {
                hv_clock = NULL;
index 4e7a37ff03ab9f6aba634be08f7650e1ef5f2f7c..81a5f5e8f142a867b56a5e335d1b2a6598a8fbbe 100644 (file)
@@ -277,18 +277,6 @@ void exit_idle(void)
 }
 #endif
 
-void arch_cpu_idle_prepare(void)
-{
-       /*
-        * If we're the non-boot CPU, nothing set the stack canary up
-        * for us.  CPU0 already has it initialized but no harm in
-        * doing it again.  This is a good place for updating it, as
-        * we wont ever return from this function (so the invalid
-        * canaries already on the stack wont ever trigger).
-        */
-       boot_init_stack_canary();
-}
-
 void arch_cpu_idle_enter(void)
 {
        local_touch_nmi();
index 9c73b51817e4743264ef7c9abd01b72e21b98a8e..bfd348e9936926f56e13ffee8b67cc77ec4a4338 100644 (file)
@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 
 void __cpuinit set_cpu_sibling_map(int cpu)
 {
-       bool has_mc = boot_cpu_data.x86_max_cores > 1;
        bool has_smt = smp_num_siblings > 1;
+       bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct cpuinfo_x86 *o;
        int i;
 
        cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 
-       if (!has_smt && !has_mc) {
+       if (!has_mp) {
                cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
                cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
                cpumask_set_cpu(cpu, cpu_core_mask(cpu));
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                if ((i == cpu) || (has_smt && match_smt(c, o)))
                        link_mask(sibling, cpu, i);
 
-               if ((i == cpu) || (has_mc && match_llc(c, o)))
+               if ((i == cpu) || (has_mp && match_llc(c, o)))
                        link_mask(llc_shared, cpu, i);
 
        }
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        for_each_cpu(i, cpu_sibling_setup_mask) {
                o = &cpu_data(i);
 
-               if ((i == cpu) || (has_mc && match_mc(c, o))) {
+               if ((i == cpu) || (has_mp && match_mc(c, o))) {
                        link_mask(core, cpu, i);
 
                        /*
index 094b5d96ab1468c1875a2a2a5e682245f2909db9..e8ba99c341808d6069b0b4f6985b8d71ef243971 100644 (file)
@@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
        if (index != XCR_XFEATURE_ENABLED_MASK)
                return 1;
        xcr0 = xcr;
-       if (kvm_x86_ops->get_cpl(vcpu) != 0)
-               return 1;
        if (!(xcr0 & XSTATE_FP))
                return 1;
        if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
-       if (__kvm_set_xcr(vcpu, index, xcr)) {
+       if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
+           __kvm_set_xcr(vcpu, index, xcr)) {
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
index 5ae2eb09419ec54d2ac7618904c31d961149df43..d2fbcedcf6eaf2fd77179075c1be2c5c4d3d8347 100644 (file)
@@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
                 * that by attempting to use more space than is available.
                 */
                unsigned long dummy_size = remaining_size + 1024;
-               void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
+               void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
+
+               if (!dummy)
+                       return EFI_OUT_OF_RESOURCES;
 
                status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
                                          EFI_VARIABLE_NON_VOLATILE |
@@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
                                         0, dummy);
                }
 
+               kfree(dummy);
+
                /*
                 * The runtime code may now have triggered a garbage collection
                 * run, so check the variable info again
index d7546c94da520625d7ae02c65c8e05e025535816..8f017eb309bda442652463f0fb4d8f1732c145d6 100644 (file)
@@ -393,14 +393,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 extern  void update_mmu_cache(struct vm_area_struct * vma,
                              unsigned long address, pte_t *ptep);
 
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-       remap_pfn_range(vma, from, pfn, size, prot)
-
 typedef pte_t *pte_addr_t;
 
 #endif /* !defined (__ASSEMBLY__) */
index 769219b293098d43bd8406e296ff8b388869e8c1..76fc0b23fc6cfd4c33188cd6e5b7454603b7a751 100644 (file)
@@ -45,10 +45,9 @@ struct cryptomgr_param {
                } nu32;
        } attrs[CRYPTO_MAX_ATTRS];
 
-       char larval[CRYPTO_MAX_ALG_NAME];
        char template[CRYPTO_MAX_ALG_NAME];
 
-       struct completion *completion;
+       struct crypto_larval *larval;
 
        u32 otype;
        u32 omask;
@@ -87,7 +86,8 @@ static int cryptomgr_probe(void *data)
        crypto_tmpl_put(tmpl);
 
 out:
-       complete_all(param->completion);
+       complete_all(&param->larval->completion);
+       crypto_alg_put(&param->larval->alg);
        kfree(param);
        module_put_and_exit(0);
 }
@@ -187,18 +187,19 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
        param->otype = larval->alg.cra_flags;
        param->omask = larval->mask;
 
-       memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
-
-       param->completion = &larval->completion;
+       crypto_alg_get(&larval->alg);
+       param->larval = larval;
 
        thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
        if (IS_ERR(thread))
-               goto err_free_param;
+               goto err_put_larval;
 
        wait_for_completion_interruptible(&larval->completion);
 
        return NOTIFY_STOP;
 
+err_put_larval:
+       crypto_alg_put(&larval->alg);
 err_free_param:
        kfree(param);
 err_put_module:
index 033a7147e5ebc4317b3bf082f090e8f2c2c8c115..3b6180336d3d54b9011bb8f90d15a220cf06b642 100644 (file)
@@ -34,12 +34,6 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
 BLOCKING_NOTIFIER_HEAD(crypto_chain);
 EXPORT_SYMBOL_GPL(crypto_chain);
 
-static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
-{
-       atomic_inc(&alg->cra_refcnt);
-       return alg;
-}
-
 struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
 {
        return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
index 9ebedae3fb54abc26a97918b3fe7aaabb2b56b82..bd39bfc92eabc1acf465e1cf8614d644eaafb402 100644 (file)
@@ -103,6 +103,12 @@ int crypto_register_notifier(struct notifier_block *nb);
 int crypto_unregister_notifier(struct notifier_block *nb);
 int crypto_probing_notify(unsigned long val, void *v);
 
+static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
+{
+       atomic_inc(&alg->cra_refcnt);
+       return alg;
+}
+
 static inline void crypto_alg_put(struct crypto_alg *alg)
 {
        if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
index 652fd5ce303c4a9efdbfa3f6d4eb330fba42a5bd..cab13f2fc28e3033aaed24adbbf618199473bb28 100644 (file)
@@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        if (dev_desc->clk_required) {
                ret = register_device_clock(adev, pdata);
                if (ret) {
-                       /*
-                        * Skip the device, but don't terminate the namespace
-                        * scan.
-                        */
-                       kfree(pdata);
-                       return 0;
+                       /* Skip the device, but continue the namespace scan. */
+                       ret = 0;
+                       goto err_out;
                }
        }
 
+       /*
+        * This works around a known issue in ACPI tables where LPSS devices
+        * have _PS0 and _PS3 without _PSC (and no power resources), so
+        * acpi_bus_init_power() will assume that the BIOS has put them into D0.
+        */
+       ret = acpi_device_fix_up_power(adev);
+       if (ret) {
+               /* Skip the device, but continue the namespace scan. */
+               ret = 0;
+               goto err_out;
+       }
+
        adev->driver_data = pdata;
        ret = acpi_create_platform_device(adev, id);
        if (ret > 0)
index 318fa32a141ec41c70c273b1f57f966f38fd55dc..31c217a42839dce40ee2039cd38f36d935bda543 100644 (file)
@@ -290,6 +290,26 @@ int acpi_bus_init_power(struct acpi_device *device)
        return 0;
 }
 
+/**
+ * acpi_device_fix_up_power - Force device with missing _PSC into D0.
+ * @device: Device object whose power state is to be fixed up.
+ *
+ * Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
+ * are assumed to be put into D0 by the BIOS.  However, in some cases that may
+ * not be the case and this function should be used then.
+ */
+int acpi_device_fix_up_power(struct acpi_device *device)
+{
+       int ret = 0;
+
+       if (!device->power.flags.power_resources
+           && !device->power.flags.explicit_get
+           && device->power.state == ACPI_STATE_D0)
+               ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
+
+       return ret;
+}
+
 int acpi_bus_update_power(acpi_handle handle, int *state_p)
 {
        struct acpi_device *device;
index 4fdea381ef21e07017a87b94ae16ce0f8860fb75..14de9f46972ee798926e9a1805c356fc02bb291e 100644 (file)
@@ -66,20 +66,21 @@ struct dock_station {
        spinlock_t dd_lock;
        struct mutex hp_lock;
        struct list_head dependent_devices;
-       struct list_head hotplug_devices;
 
        struct list_head sibling;
        struct platform_device *dock_device;
 };
 static LIST_HEAD(dock_stations);
 static int dock_station_count;
+static DEFINE_MUTEX(hotplug_lock);
 
 struct dock_dependent_device {
        struct list_head list;
-       struct list_head hotplug_list;
        acpi_handle handle;
-       const struct acpi_dock_ops *ops;
-       void *context;
+       const struct acpi_dock_ops *hp_ops;
+       void *hp_context;
+       unsigned int hp_refcount;
+       void (*hp_release)(void *);
 };
 
 #define DOCK_DOCKING   0x00000001
@@ -111,7 +112,6 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
 
        dd->handle = handle;
        INIT_LIST_HEAD(&dd->list);
-       INIT_LIST_HEAD(&dd->hotplug_list);
 
        spin_lock(&ds->dd_lock);
        list_add_tail(&dd->list, &ds->dependent_devices);
@@ -121,35 +121,90 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
 }
 
 /**
- * dock_add_hotplug_device - associate a hotplug handler with the dock station
- * @ds: The dock station
- * @dd: The dependent device struct
- *
- * Add the dependent device to the dock's hotplug device list
+ * dock_init_hotplug - Initialize a hotplug device on a docking station.
+ * @dd: Dock-dependent device.
+ * @ops: Dock operations to attach to the dependent device.
+ * @context: Data to pass to the @ops callbacks and @release.
+ * @init: Optional initialization routine to run after setting up context.
+ * @release: Optional release routine to run on removal.
  */
-static void
-dock_add_hotplug_device(struct dock_station *ds,
-                       struct dock_dependent_device *dd)
+static int dock_init_hotplug(struct dock_dependent_device *dd,
+                            const struct acpi_dock_ops *ops, void *context,
+                            void (*init)(void *), void (*release)(void *))
 {
-       mutex_lock(&ds->hp_lock);
-       list_add_tail(&dd->hotplug_list, &ds->hotplug_devices);
-       mutex_unlock(&ds->hp_lock);
+       int ret = 0;
+
+       mutex_lock(&hotplug_lock);
+
+       if (dd->hp_context) {
+               ret = -EEXIST;
+       } else {
+               dd->hp_refcount = 1;
+               dd->hp_ops = ops;
+               dd->hp_context = context;
+               dd->hp_release = release;
+       }
+
+       if (!WARN_ON(ret) && init)
+               init(context);
+
+       mutex_unlock(&hotplug_lock);
+       return ret;
 }
 
 /**
- * dock_del_hotplug_device - remove a hotplug handler from the dock station
- * @ds: The dock station
- * @dd: the dependent device struct
+ * dock_release_hotplug - Decrement hotplug reference counter of dock device.
+ * @dd: Dock-dependent device.
  *
- * Delete the dependent device from the dock's hotplug device list
+ * Decrement the reference counter of @dd and if 0, detach its hotplug
+ * operations from it, reset its context pointer and run the optional release
+ * routine if present.
  */
-static void
-dock_del_hotplug_device(struct dock_station *ds,
-                       struct dock_dependent_device *dd)
+static void dock_release_hotplug(struct dock_dependent_device *dd)
 {
-       mutex_lock(&ds->hp_lock);
-       list_del(&dd->hotplug_list);
-       mutex_unlock(&ds->hp_lock);
+       void (*release)(void *) = NULL;
+       void *context = NULL;
+
+       mutex_lock(&hotplug_lock);
+
+       if (dd->hp_context && !--dd->hp_refcount) {
+               dd->hp_ops = NULL;
+               context = dd->hp_context;
+               dd->hp_context = NULL;
+               release = dd->hp_release;
+               dd->hp_release = NULL;
+       }
+
+       if (release && context)
+               release(context);
+
+       mutex_unlock(&hotplug_lock);
+}
+
+static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
+                              bool uevent)
+{
+       acpi_notify_handler cb = NULL;
+       bool run = false;
+
+       mutex_lock(&hotplug_lock);
+
+       if (dd->hp_context) {
+               run = true;
+               dd->hp_refcount++;
+               if (dd->hp_ops)
+                       cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
+       }
+
+       mutex_unlock(&hotplug_lock);
+
+       if (!run)
+               return;
+
+       if (cb)
+               cb(dd->handle, event, dd->hp_context);
+
+       dock_release_hotplug(dd);
 }
 
 /**
@@ -360,9 +415,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
        /*
         * First call driver specific hotplug functions
         */
-       list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
-               if (dd->ops && dd->ops->handler)
-                       dd->ops->handler(dd->handle, event, dd->context);
+       list_for_each_entry(dd, &ds->dependent_devices, list)
+               dock_hotplug_event(dd, event, false);
 
        /*
         * Now make sure that an acpi_device is created for each
@@ -398,9 +452,8 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
        if (num == DOCK_EVENT)
                kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
 
-       list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
-               if (dd->ops && dd->ops->uevent)
-                       dd->ops->uevent(dd->handle, event, dd->context);
+       list_for_each_entry(dd, &ds->dependent_devices, list)
+               dock_hotplug_event(dd, event, true);
 
        if (num != DOCK_EVENT)
                kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
@@ -570,19 +623,24 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
  * @handle: the handle of the device
  * @ops: handlers to call after docking
  * @context: device specific data
+ * @init: Optional initialization routine to run after registration
+ * @release: Optional release routine to run on unregistration
  *
  * If a driver would like to perform a hotplug operation after a dock
  * event, they can register an acpi_notifiy_handler to be called by
  * the dock driver after _DCK is executed.
  */
-int
-register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
-                            void *context)
+int register_hotplug_dock_device(acpi_handle handle,
+                                const struct acpi_dock_ops *ops, void *context,
+                                void (*init)(void *), void (*release)(void *))
 {
        struct dock_dependent_device *dd;
        struct dock_station *dock_station;
        int ret = -EINVAL;
 
+       if (WARN_ON(!context))
+               return -EINVAL;
+
        if (!dock_station_count)
                return -ENODEV;
 
@@ -597,12 +655,8 @@ register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops
                 * ops
                 */
                dd = find_dock_dependent_device(dock_station, handle);
-               if (dd) {
-                       dd->ops = ops;
-                       dd->context = context;
-                       dock_add_hotplug_device(dock_station, dd);
+               if (dd && !dock_init_hotplug(dd, ops, context, init, release))
                        ret = 0;
-               }
        }
 
        return ret;
@@ -624,7 +678,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
        list_for_each_entry(dock_station, &dock_stations, sibling) {
                dd = find_dock_dependent_device(dock_station, handle);
                if (dd)
-                       dock_del_hotplug_device(dock_station, dd);
+                       dock_release_hotplug(dd);
        }
 }
 EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@@ -868,8 +922,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
        if (!count)
                return -EINVAL;
 
+       acpi_scan_lock_acquire();
        begin_undock(dock_station);
        ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
+       acpi_scan_lock_release();
        return ret ? ret: count;
 }
 static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
@@ -951,7 +1007,6 @@ static int __init dock_add(acpi_handle handle)
        mutex_init(&dock_station->hp_lock);
        spin_lock_init(&dock_station->dd_lock);
        INIT_LIST_HEAD(&dock_station->sibling);
-       INIT_LIST_HEAD(&dock_station->hotplug_devices);
        ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
        INIT_LIST_HEAD(&dock_station->dependent_devices);
 
@@ -991,30 +1046,6 @@ err_unregister:
        return ret;
 }
 
-/**
- * dock_remove - free up resources related to the dock station
- */
-static int dock_remove(struct dock_station *ds)
-{
-       struct dock_dependent_device *dd, *tmp;
-       struct platform_device *dock_device = ds->dock_device;
-
-       if (!dock_station_count)
-               return 0;
-
-       /* remove dependent devices */
-       list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
-               kfree(dd);
-
-       list_del(&ds->sibling);
-
-       /* cleanup sysfs */
-       sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
-       platform_device_unregister(dock_device);
-
-       return 0;
-}
-
 /**
  * find_dock_and_bay - look for dock stations and bays
  * @handle: acpi handle of a device
@@ -1033,7 +1064,7 @@ find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
        return AE_OK;
 }
 
-static int __init dock_init(void)
+int __init acpi_dock_init(void)
 {
        if (acpi_disabled)
                return 0;
@@ -1052,19 +1083,3 @@ static int __init dock_init(void)
                ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
        return 0;
 }
-
-static void __exit dock_exit(void)
-{
-       struct dock_station *tmp, *dock_station;
-
-       unregister_acpi_bus_notifier(&dock_acpi_notifier);
-       list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
-               dock_remove(dock_station);
-}
-
-/*
- * Must be called before drivers of devices in dock, otherwise we can't know
- * which devices are in a dock
- */
-subsys_initcall(dock_init);
-module_exit(dock_exit);
index 297cbf456f86bee35300ef6c72058615663f563f..c610a76d92c4b4bdf5deafae76252110460d5ed2 100644 (file)
@@ -40,6 +40,11 @@ void acpi_container_init(void);
 #else
 static inline void acpi_container_init(void) {}
 #endif
+#ifdef CONFIG_ACPI_DOCK
+void acpi_dock_init(void);
+#else
+static inline void acpi_dock_init(void) {}
+#endif
 #ifdef CONFIG_ACPI_HOTPLUG_MEMORY
 void acpi_memory_hotplug_init(void);
 #else
index f962047c6c85591762893ada0a73549913b03d85..288bb270f8edc11cb0d92e23f7b759619e35e660 100644 (file)
@@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle)
                                ACPI_STA_DEFAULT);
        mutex_init(&resource->resource_lock);
        INIT_LIST_HEAD(&resource->dependent);
+       INIT_LIST_HEAD(&resource->list_node);
        resource->name = device->pnp.bus_id;
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
index a3868f6c222abfc65541a5a599035e18145108f9..3322b47ab7cae22dc08520b6cec9a3b1df981b84 100644 (file)
@@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
 }
 
 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
-                                    u8 triggering, u8 polarity, u8 shareable)
+                                    u8 triggering, u8 polarity, u8 shareable,
+                                    bool legacy)
 {
        int irq, p, t;
 
@@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
         * In IO-APIC mode, use overrided attribute. Two reasons:
         * 1. BIOS bug in DSDT
         * 2. BIOS uses IO-APIC mode Interrupt Source Override
+        *
+        * We do this only if we are dealing with IRQ() or IRQNoFlags()
+        * resource (the legacy ISA resources). With modern ACPI 5 devices
+        * using extended IRQ descriptors we take the IRQ configuration
+        * from _CRS directly.
         */
-       if (!acpi_get_override_irq(gsi, &t, &p)) {
+       if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
                u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
                u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
 
                if (triggering != trig || polarity != pol) {
                        pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
-                                  t ? "edge" : "level", p ? "low" : "high");
+                                  t ? "level" : "edge", p ? "low" : "high");
                        triggering = trig;
                        polarity = pol;
                }
@@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                }
                acpi_dev_get_irqresource(res, irq->interrupts[index],
                                         irq->triggering, irq->polarity,
-                                        irq->sharable);
+                                        irq->sharable, true);
                break;
        case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
                ext_irq = &ares->data.extended_irq;
@@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                }
                acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
                                         ext_irq->triggering, ext_irq->polarity,
-                                        ext_irq->sharable);
+                                        ext_irq->sharable, false);
                break;
        default:
                return false;
index b14ac46948c9e444246eba50e367ed1a16399ea2..27da63061e11ae88c628242e0eed71fc0e1d7588 100644 (file)
@@ -2042,6 +2042,7 @@ int __init acpi_scan_init(void)
        acpi_lpss_init();
        acpi_container_init();
        acpi_memory_hotplug_init();
+       acpi_dock_init();
 
        mutex_lock(&acpi_scan_lock);
        /*
index 87f2f395d79a1f9a9cd3dbc81675f0d71ac6f2cd..cf4e7020adacde5e69881a21adb0c578d31d7a3e 100644 (file)
@@ -156,8 +156,10 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
 
        spin_unlock_irqrestore(ap->lock, flags);
 
-       if (wait)
+       if (wait) {
                ata_port_wait_eh(ap);
+               flush_work(&ap->hotplug_task.work);
+       }
 }
 
 static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
@@ -214,6 +216,39 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
        .uevent = ata_acpi_ap_uevent,
 };
 
+void ata_acpi_hotplug_init(struct ata_host *host)
+{
+       int i;
+
+       for (i = 0; i < host->n_ports; i++) {
+               struct ata_port *ap = host->ports[i];
+               acpi_handle handle;
+               struct ata_device *dev;
+
+               if (!ap)
+                       continue;
+
+               handle = ata_ap_acpi_handle(ap);
+               if (handle) {
+                       /* we might be on a docking station */
+                       register_hotplug_dock_device(handle,
+                                                    &ata_acpi_ap_dock_ops, ap,
+                                                    NULL, NULL);
+               }
+
+               ata_for_each_dev(dev, &ap->link, ALL) {
+                       handle = ata_dev_acpi_handle(dev);
+                       if (!handle)
+                               continue;
+
+                       /* we might be on a docking station */
+                       register_hotplug_dock_device(handle,
+                                                    &ata_acpi_dev_dock_ops,
+                                                    dev, NULL, NULL);
+               }
+       }
+}
+
 /**
  * ata_acpi_dissociate - dissociate ATA host from ACPI objects
  * @host: target ATA host
index f2184276539d885d167c2048a305847b51e58417..adf002a3c584b3d2bd7fb27357df040a381f3b82 100644 (file)
@@ -6148,6 +6148,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
        if (rc)
                goto err_tadd;
 
+       ata_acpi_hotplug_init(host);
+
        /* set cable, sata_spd_limit and report */
        for (i = 0; i < host->n_ports; i++) {
                struct ata_port *ap = host->ports[i];
index c949dd311b2ecd2e6d65e294409d24c4afea6eb7..577d902bc4deaa12d69d1acdc8aa304732163227 100644 (file)
@@ -122,6 +122,7 @@ extern int ata_acpi_register(void);
 extern void ata_acpi_unregister(void);
 extern void ata_acpi_bind(struct ata_device *dev);
 extern void ata_acpi_unbind(struct ata_device *dev);
+extern void ata_acpi_hotplug_init(struct ata_host *host);
 #else
 static inline void ata_acpi_dissociate(struct ata_host *host) { }
 static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -134,6 +135,7 @@ static inline int ata_acpi_register(void) { return 0; }
 static inline void ata_acpi_unregister(void) { }
 static inline void ata_acpi_bind(struct ata_device *dev) { }
 static inline void ata_acpi_unbind(struct ata_device *dev) { }
+static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
 #endif
 
 /* libata-scsi.c */
index 4b1f9265887f1048ff5a64cc14fa3d04964b519b..01e21037d8feb5c04e6aedee608e9cdedc0d87bd 100644 (file)
@@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 {
        struct firmware_buf *buf = fw_priv->buf;
 
+       /*
+        * There is a small window in which user can write to 'loading'
+        * between loading done and disappearance of 'loading'
+        */
+       if (test_bit(FW_STATUS_DONE, &buf->status))
+               return;
+
        set_bit(FW_STATUS_ABORT, &buf->status);
        complete_all(&buf->completion);
+
+       /* avoid user action after loading abort */
+       fw_priv->buf = NULL;
 }
 
 #define is_fw_load_aborted(buf)        \
@@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
        struct firmware_priv *fw_priv = to_firmware_priv(dev);
-       int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+       int loading = 0;
+
+       mutex_lock(&fw_lock);
+       if (fw_priv->buf)
+               loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+       mutex_unlock(&fw_lock);
 
        return sprintf(buf, "%d\n", loading);
 }
@@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct firmware_priv *fw_priv = to_firmware_priv(dev);
-       struct firmware_buf *fw_buf = fw_priv->buf;
+       struct firmware_buf *fw_buf;
        int loading = simple_strtol(buf, NULL, 10);
        int i;
 
        mutex_lock(&fw_lock);
-
+       fw_buf = fw_priv->buf;
        if (!fw_buf)
                goto out;
 
@@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work)
                        struct firmware_priv, timeout_work.work);
 
        mutex_lock(&fw_lock);
-       if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
-               mutex_unlock(&fw_lock);
-               return;
-       }
        fw_load_abort(fw_priv);
        mutex_unlock(&fw_lock);
 }
@@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
 
        cancel_delayed_work_sync(&fw_priv->timeout_work);
 
-       fw_priv->buf = NULL;
-
        device_remove_file(f_dev, &dev_attr_loading);
 err_del_bin_attr:
        device_remove_bin_file(f_dev, &firmware_attr_data);
index 8b6bb764b0a3f83edb759fdb2ac80559c3dfb511..99e773cb70d0b58d4a54115be3593986966def0f 100644 (file)
@@ -25,9 +25,9 @@
 #include <linux/string.h>
 #include <linux/crypto.h>
 #include <linux/blkdev.h>
-#include <linux/loop.h>
 #include <linux/scatterlist.h>
 #include <asm/uaccess.h>
+#include "loop.h"
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
index d92d50fd84b7d4ec59d5537eed5d9fd288005c51..40e715531aa65f0e63babd13757ad6dfb104d22a 100644 (file)
@@ -63,7 +63,6 @@
 #include <linux/init.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
-#include <linux/loop.h>
 #include <linux/compat.h>
 #include <linux/suspend.h>
 #include <linux/freezer.h>
@@ -76,6 +75,7 @@
 #include <linux/sysfs.h>
 #include <linux/miscdevice.h>
 #include <linux/falloc.h>
+#include "loop.h"
 
 #include <asm/uaccess.h>
 
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
new file mode 100644 (file)
index 0000000..90df5d6
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * loop.h
+ *
+ * Written by Theodore Ts'o, 3/29/93.
+ *
+ * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
+ * permitted under the GNU General Public License.
+ */
+#ifndef _LINUX_LOOP_H
+#define _LINUX_LOOP_H
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <uapi/linux/loop.h>
+
+/* Possible states of device */
+enum {
+       Lo_unbound,
+       Lo_bound,
+       Lo_rundown,
+};
+
+struct loop_func_table;
+
+struct loop_device {
+       int             lo_number;
+       int             lo_refcnt;
+       loff_t          lo_offset;
+       loff_t          lo_sizelimit;
+       int             lo_flags;
+       int             (*transfer)(struct loop_device *, int cmd,
+                                   struct page *raw_page, unsigned raw_off,
+                                   struct page *loop_page, unsigned loop_off,
+                                   int size, sector_t real_block);
+       char            lo_file_name[LO_NAME_SIZE];
+       char            lo_crypt_name[LO_NAME_SIZE];
+       char            lo_encrypt_key[LO_KEY_SIZE];
+       int             lo_encrypt_key_size;
+       struct loop_func_table *lo_encryption;
+       __u32           lo_init[2];
+       kuid_t          lo_key_owner;   /* Who set the key */
+       int             (*ioctl)(struct loop_device *, int cmd, 
+                                unsigned long arg); 
+
+       struct file *   lo_backing_file;
+       struct block_device *lo_device;
+       unsigned        lo_blocksize;
+       void            *key_data; 
+
+       gfp_t           old_gfp_mask;
+
+       spinlock_t              lo_lock;
+       struct bio_list         lo_bio_list;
+       unsigned int            lo_bio_count;
+       int                     lo_state;
+       struct mutex            lo_ctl_mutex;
+       struct task_struct      *lo_thread;
+       wait_queue_head_t       lo_event;
+       /* wait queue for incoming requests */
+       wait_queue_head_t       lo_req_wait;
+
+       struct request_queue    *lo_queue;
+       struct gendisk          *lo_disk;
+};
+
+/* Support for loadable transfer modules */
+struct loop_func_table {
+       int number;     /* filter type */ 
+       int (*transfer)(struct loop_device *lo, int cmd,
+                       struct page *raw_page, unsigned raw_off,
+                       struct page *loop_page, unsigned loop_off,
+                       int size, sector_t real_block);
+       int (*init)(struct loop_device *, const struct loop_info64 *); 
+       /* release is called from loop_unregister_transfer or clr_fd */
+       int (*release)(struct loop_device *); 
+       int (*ioctl)(struct loop_device *, int cmd, unsigned long arg);
+       struct module *owner;
+}; 
+
+int loop_register_transfer(struct loop_func_table *funcs);
+int loop_unregister_transfer(int number); 
+
+#endif
index 3063452e55daf5dafd5b28bb536f81109212fc2e..aff789d6fccd35b7f0a0c3314e5c04951c6c774c 100644 (file)
@@ -1036,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
        char *name;
        u64 segment;
        int ret;
+       char *name_format;
 
        name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
        if (!name)
                return NULL;
        segment = offset >> rbd_dev->header.obj_order;
-       ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
+       name_format = "%s.%012llx";
+       if (rbd_dev->image_format == 2)
+               name_format = "%s.%016llx";
+       ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
                        rbd_dev->header.object_prefix, segment);
        if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
                pr_err("error formatting segment name for #%llu (%d)\n",
@@ -2248,13 +2252,17 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
                                        obj_request->pages, length,
                                        offset & ~PAGE_MASK, false, false);
 
+               /*
+                * set obj_request->img_request before formatting
+                * the osd_request so that it gets the right snapc
+                */
+               rbd_img_obj_request_add(img_request, obj_request);
                if (write_request)
                        rbd_osd_req_format_write(obj_request);
                else
                        rbd_osd_req_format_read(obj_request);
 
                obj_request->img_offset = img_offset;
-               rbd_img_obj_request_add(img_request, obj_request);
 
                img_offset += length;
                resid -= length;
@@ -4239,6 +4247,10 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
 
        down_write(&rbd_dev->header_rwsem);
 
+       ret = rbd_dev_v2_image_size(rbd_dev);
+       if (ret)
+               goto out;
+
        if (first_time) {
                ret = rbd_dev_v2_header_onetime(rbd_dev);
                if (ret)
@@ -4272,10 +4284,6 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
                                        "is EXPERIMENTAL!");
        }
 
-       ret = rbd_dev_v2_image_size(rbd_dev);
-       if (ret)
-               goto out;
-
        if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
                if (rbd_dev->mapping.size != rbd_dev->header.image_size)
                        rbd_dev->mapping.size = rbd_dev->header.image_size;
index 3a4343b3bd6d0dfeb0ef98370d4e23f89b39f996..9a9f51875df5ef7826cea976b6afefa0889e160b 100644 (file)
@@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
                add_wait_queue(&thread->wait_q, &wait);
 
                set_current_state(TASK_INTERRUPTIBLE);
+               if (kthread_should_stop()) {
+                       BT_DBG("main_thread: break from main thread");
+                       break;
+               }
 
                if (adapter->wakeup_tries ||
                                ((!adapter->int_count) &&
@@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
 
                BT_DBG("main_thread woke up");
 
-               if (kthread_should_stop()) {
-                       BT_DBG("main_thread: break from main thread");
-                       break;
-               }
-
                spin_lock_irqsave(&priv->driver_lock, flags);
                if (adapter->int_count) {
                        adapter->int_count = 0;
index 3bb6fa3930beba0c07282f858be05e2c74e10b79..14219972c745ae6f31636e6e6bd0e89335f22c08 100644 (file)
@@ -15,18 +15,6 @@ config DEVKMEM
          kind of kernel debugging operations.
          When in doubt, say "N".
 
-config STALDRV
-       bool "Stallion multiport serial support"
-       depends on SERIAL_NONSTANDARD
-       help
-         Stallion cards give you many serial ports.  You would need something
-         like this to connect more than two modems to your Linux box, for
-         instance in order to become a dial-in server.  If you say Y here,
-         you will be asked for your specific card model in the next
-         questions.  Make sure to read <file:Documentation/serial/stallion.txt>
-         in this case.  If you have never heard about all this, it's safe to
-         say N.
-
 config SGI_SNSC
        bool "SGI Altix system controller communication support"
        depends on (IA64_SGI_SN2 || IA64_GENERIC)
index 934cfd18f72ded4a295192f608cc653214c03908..1144e8c7579dddbf5c0a0b0ad3363c7f5d79c57e 100644 (file)
@@ -1955,6 +1955,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
                /* XXX the notifier code should handle this better */
                if (!cn->notifier_head.head) {
                        srcu_cleanup_notifier_head(&cn->notifier_head);
+                       list_del(&cn->node);
                        kfree(cn);
                }
 
index 5c97e75924a8a87b4aebfa5e7ddfde1f31231fff..22d7699e7cedc7509849e0062212f9df18b4b1b3 100644 (file)
@@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
 
 /* list of all parent clock list */
 PNAME(mout_apll_p)     = { "fin_pll", "fout_apll", };
-PNAME(mout_cpu_p)      = { "mout_apll", "mout_mpll", };
+PNAME(mout_cpu_p)      = { "mout_apll", "sclk_mpll", };
 PNAME(mout_mpll_fout_p)        = { "fout_mplldiv2", "fout_mpll" };
 PNAME(mout_mpll_p)     = { "fin_pll", "mout_mpll_fout" };
 PNAME(mout_bpll_fout_p)        = { "fout_bplldiv2", "fout_bpll" };
@@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
 };
 
 struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
-       MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
-       MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
+       MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
+       MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
        MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
-       MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
+       MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
        MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
        MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
        MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
@@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
        GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
        GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
        GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
-       GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0),
+       GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
        GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
        GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
        GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
index 89135f6be116cc47e88267f671a472c7dff49718..362f12dcd94422fd7fb6c9da1c9b71684e649cef 100644 (file)
@@ -111,7 +111,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
                                unsigned long parent_rate)
 {
        struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
-       u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+       u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
+       s16 kdiv;
        u64 fvco = parent_rate;
 
        pll_con0 = __raw_readl(pll->con_reg);
@@ -119,7 +120,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
        mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
        pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
        sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
-       kdiv = pll_con1 & PLL36XX_KDIV_MASK;
+       kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
 
        fvco *= (mdiv << 16) + kdiv;
        do_div(fvco, (pdiv << sdiv));
index f9ec43fd1320065b1d2050bf799dad8d1a5a8028..080c3c5e33f67823c28b79ad1c0fa4707afeaaee 100644 (file)
@@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
        clk_register_clkdev(clk, NULL, "60100000.serial");
 }
 #else
-static inline void spear320_clk_init(void) { }
+static inline void spear320_clk_init(void __iomem *soc_config_base) { }
 #endif
 
 void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
index c6921f538e28dd370cb156588ab0d5e77645ec07..ba99e3844106ec961c03c33cd637ad59b217e2b3 100644 (file)
@@ -1598,6 +1598,12 @@ static void __init tegra30_periph_clk_init(void)
        clk_register_clkdev(clk, "afi", "tegra-pcie");
        clks[afi] = clk;
 
+       /* pciex */
+       clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
+                                   74, &periph_u_regs, periph_clk_enb_refcnt);
+       clk_register_clkdev(clk, "pciex", "tegra-pcie");
+       clks[pciex] = clk;
+
        /* kfuse */
        clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
                                    TEGRA_PERIPH_ON_APB,
@@ -1716,11 +1722,6 @@ static void __init tegra30_fixed_clk_init(void)
                                1, 0, &cml_lock);
        clk_register_clkdev(clk, "cml1", NULL);
        clks[cml1] = clk;
-
-       /* pciex */
-       clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
-       clk_register_clkdev(clk, "pciex", NULL);
-       clks[pciex] = clk;
 }
 
 static void __init tegra30_osc_clk_init(void)
index 4b9bb5def6f159a126e4f05aa6a76d365c6a23f8..93eb5cbcc1f639bf1ec52479a169a49deecd72f5 100644 (file)
@@ -47,6 +47,8 @@ static struct od_ops od_ops;
 static struct cpufreq_governor cpufreq_gov_ondemand;
 #endif
 
+static unsigned int default_powersave_bias;
+
 static void ondemand_powersave_bias_init_cpu(int cpu)
 {
        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -543,7 +545,7 @@ static int od_init(struct dbs_data *dbs_data)
 
        tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
        tuners->ignore_nice = 0;
-       tuners->powersave_bias = 0;
+       tuners->powersave_bias = default_powersave_bias;
        tuners->io_is_busy = should_io_be_busy();
 
        dbs_data->tuners = tuners;
@@ -585,6 +587,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
        unsigned int cpu;
        cpumask_t done;
 
+       default_powersave_bias = powersave_bias;
        cpumask_clear(&done);
 
        get_online_cpus();
@@ -593,11 +596,17 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
                        continue;
 
                policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
-               dbs_data = policy->governor_data;
-               od_tuners = dbs_data->tuners;
-               od_tuners->powersave_bias = powersave_bias;
+               if (!policy)
+                       continue;
 
                cpumask_or(&done, &done, policy->cpus);
+
+               if (policy->governor != &cpufreq_gov_ondemand)
+                       continue;
+
+               dbs_data = policy->governor_data;
+               od_tuners = dbs_data->tuners;
+               od_tuners->powersave_bias = default_powersave_bias;
        }
        put_online_cpus();
 }
index d3f7d2db870f985253a603f3f53f2b5475ee97f6..4a430360af5a2d6932c15170a2149e7aee4b1457 100644 (file)
@@ -1094,6 +1094,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
        const struct omap_gpio_platform_data *pdata;
        struct resource *res;
        struct gpio_bank *bank;
+#ifdef CONFIG_ARCH_OMAP1
+       int irq_base;
+#endif
 
        match = of_match_device(of_match_ptr(omap_gpio_match), dev);
 
@@ -1135,11 +1138,28 @@ static int omap_gpio_probe(struct platform_device *pdev)
                                pdata->get_context_loss_count;
        }
 
+#ifdef CONFIG_ARCH_OMAP1
+       /*
+        * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
+        * irq_alloc_descs() and irq_domain_add_legacy() and just use a
+        * linear IRQ domain mapping for all OMAP platforms.
+        */
+       irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
+       if (irq_base < 0) {
+               dev_err(dev, "Couldn't allocate IRQ numbers\n");
+               return -ENODEV;
+       }
 
+       bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
+                                            0, &irq_domain_simple_ops, NULL);
+#else
        bank->domain = irq_domain_add_linear(node, bank->width,
                                             &irq_domain_simple_ops, NULL);
-       if (!bank->domain)
+#endif
+       if (!bank->domain) {
+               dev_err(dev, "Couldn't register an IRQ domain\n");
                return -ENODEV;
+       }
 
        if (bank->regs->set_dataout && bank->regs->clr_dataout)
                bank->set_dataout = _set_gpio_dataout_reg;
index dcde35231e259c83460b9dfb8bb9b4a1514ca3a5..5b7b9110254b1669d046c85d2429a6d93cb2a36c 100644 (file)
@@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
                if (ret)
                        return ERR_PTR(ret);
        }
-       return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
-                             0600);
+       return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
index 1d4f7c9fe661896ba02be00083495e3a221a2214..67969e25d60f2c218a16ca28d3c76408109cb116 100644 (file)
@@ -617,7 +617,6 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
        case _DRM_FRAME_BUFFER:
        case _DRM_REGISTERS:
                offset = drm_core_get_reg_ofs(dev);
-               vma->vm_flags |= VM_IO; /* not in core dump */
                vma->vm_page_prot = drm_io_prot(map->type, vma);
                if (io_remap_pfn_range(vma, vma->vm_start,
                                       (map->offset + offset) >> PAGE_SHIFT,
index 004ecdfe1b556769327e08cfc0e81280ab94568f..ada49eda489fe661bbc202f4b43def6ec72e303c 100644 (file)
@@ -97,7 +97,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
        buf = dev_priv->mmap_buffer;
        buf_priv = buf->dev_private;
 
-       vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+       vma->vm_flags |= VM_DONTCOPY;
 
        buf_priv->currently_mapped = I810_BUF_MAPPED;
 
index b9d00dcf9a2d4fcb618c67af82df56ee640f72e2..9669a0b8b440384394f1d30cc2890add8525a8f7 100644 (file)
@@ -1697,6 +1697,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                struct drm_gem_object *gem_obj, int flags);
 
+void i915_gem_restore_fences(struct drm_device *dev);
+
 /* i915_gem_context.c */
 void i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
index 970ad17c99ab1092522999a4382737b5dbffbafe..9e35dafc580724da0f48db14c441f57db951b45d 100644 (file)
@@ -1801,7 +1801,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                        gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
                        gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
-
+#ifdef CONFIG_SWIOTLB
+               if (swiotlb_nr_tbl()) {
+                       st->nents++;
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+                       sg = sg_next(sg);
+                       continue;
+               }
+#endif
                if (!i || page_to_pfn(page) != last_pfn + 1) {
                        if (i)
                                sg = sg_next(sg);
@@ -1812,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                }
                last_pfn = page_to_pfn(page);
        }
-
-       sg_mark_end(sg);
+#ifdef CONFIG_SWIOTLB
+       if (!swiotlb_nr_tbl())
+#endif
+               sg_mark_end(sg);
        obj->pages = st;
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2117,25 +2126,15 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
        }
 }
 
-static void i915_gem_reset_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
-               if (reg->obj)
-                       i915_gem_object_fence_lost(reg->obj);
-
-               i915_gem_write_fence(dev, i, NULL);
-
-               reg->pin_count = 0;
-               reg->obj = NULL;
-               INIT_LIST_HEAD(&reg->lru_list);
+               i915_gem_write_fence(dev, i, reg->obj);
        }
-
-       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2158,8 +2157,7 @@ void i915_gem_reset(struct drm_device *dev)
                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
        }
 
-       /* The fence registers are invalidated so clear them out */
-       i915_gem_reset_fences(dev);
+       i915_gem_restore_fences(dev);
 }
 
 /**
@@ -3865,8 +3863,6 @@ i915_gem_idle(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
 
-       i915_gem_reset_fences(dev);
-
        /* Hack!  Don't let anybody do execbuf while we don't control the chip.
         * We need to replace this with a semaphore, or something.
         * And not confound mm.suspended!
@@ -4193,7 +4189,8 @@ i915_gem_load(struct drm_device *dev)
                dev_priv->num_fence_regs = 8;
 
        /* Initialize fence registers to zero */
-       i915_gem_reset_fences(dev);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       i915_gem_restore_fences(dev);
 
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
index 41f0fdecfbdc27c27f3132d39130e14ba19fd999..369b3d8776ab42c6ef52dafa94174c3038ecebe0 100644 (file)
@@ -384,6 +384,7 @@ int i915_restore_state(struct drm_device *dev)
 
        mutex_lock(&dev->struct_mutex);
 
+       i915_gem_restore_fences(dev);
        i915_restore_display(dev);
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
index a4b71b25fa5366c948f8447e6c7b6f0808b4aab4..a30f29425c216afb5f4983a9be32210355c8e37d 100644 (file)
@@ -171,6 +171,11 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
                if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
                        return -EINVAL;
 
+               if (!access_ok(VERIFY_READ,
+                              (void *)(unsigned long)user_cmd.command,
+                              user_cmd.command_size))
+                       return -EFAULT;
+
                ret = qxl_alloc_release_reserved(qdev,
                                                 sizeof(union qxl_release_info) +
                                                 user_cmd.command_size,
index 0e5341695922b504298b103df17c936e95a34d0a..6948eb88c2b7848cad2ff436e0df835bdd8d7999 100644 (file)
@@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
 int r600_uvd_init(struct radeon_device *rdev)
 {
        int i, j, r;
+       /* disable byte swapping */
+       u32 lmi_swap_cntl = 0;
+       u32 mp_swap_cntl = 0;
 
        /* raise clocks while booting up the VCPU */
        radeon_set_uvd_clocks(rdev, 53300, 40000);
@@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev)
        WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
                             (1 << 21) | (1 << 9) | (1 << 20));
 
-       /* disable byte swapping */
-       WREG32(UVD_LMI_SWAP_CNTL, 0);
-       WREG32(UVD_MP_SWAP_CNTL, 0);
+#ifdef __BIG_ENDIAN
+       /* swap (8 in 32) RB and IB */
+       lmi_swap_cntl = 0xa;
+       mp_swap_cntl = 0;
+#endif
+       WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+       WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
 
        WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
        WREG32(UVD_MPC_SET_MUXA1, 0x0);
index 189973836cff691ced976818f1bf045ed58e424b..b0dc0b6cb4e0f83603c4f56255205880b197c4d6 100644 (file)
@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
  */
 void radeon_wb_disable(struct radeon_device *rdev)
 {
-       int r;
-
-       if (rdev->wb.wb_obj) {
-               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-               if (unlikely(r != 0))
-                       return;
-               radeon_bo_kunmap(rdev->wb.wb_obj);
-               radeon_bo_unpin(rdev->wb.wb_obj);
-               radeon_bo_unreserve(rdev->wb.wb_obj);
-       }
        rdev->wb.enabled = false;
 }
 
@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
 {
        radeon_wb_disable(rdev);
        if (rdev->wb.wb_obj) {
+               if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+                       radeon_bo_kunmap(rdev->wb.wb_obj);
+                       radeon_bo_unpin(rdev->wb.wb_obj);
+                       radeon_bo_unreserve(rdev->wb.wb_obj);
+               }
                radeon_bo_unref(&rdev->wb.wb_obj);
                rdev->wb.wb = NULL;
                rdev->wb.wb_obj = NULL;
@@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev)
                        dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
                        return r;
                }
-       }
-       r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-       if (unlikely(r != 0)) {
-               radeon_wb_fini(rdev);
-               return r;
-       }
-       r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-                         &rdev->wb.gpu_addr);
-       if (r) {
+               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+               if (unlikely(r != 0)) {
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+                               &rdev->wb.gpu_addr);
+               if (r) {
+                       radeon_bo_unreserve(rdev->wb.wb_obj);
+                       dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
                radeon_bo_unreserve(rdev->wb.wb_obj);
-               dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-               radeon_wb_fini(rdev);
-               return r;
-       }
-       r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-       radeon_bo_unreserve(rdev->wb.wb_obj);
-       if (r) {
-               dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
-               radeon_wb_fini(rdev);
-               return r;
+               if (r) {
+                       dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
        }
 
        /* clear wb memory */
index 5b937dfe6f65b8fc1c89f0370afbeb1100a61fdf..ddb8f8e04eb549f4fd264b2f704cdd407bcc65e0 100644 (file)
@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
 {
        struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               *drv->cpu_addr = cpu_to_le32(seq);
+               if (drv->cpu_addr) {
+                       *drv->cpu_addr = cpu_to_le32(seq);
+               }
        } else {
                WREG32(drv->scratch_reg, seq);
        }
@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
        u32 seq = 0;
 
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               seq = le32_to_cpu(*drv->cpu_addr);
+               if (drv->cpu_addr) {
+                       seq = le32_to_cpu(*drv->cpu_addr);
+               } else {
+                       seq = lower_32_bits(atomic64_read(&drv->last_seq));
+               }
        } else {
                seq = RREG32(drv->scratch_reg);
        }
index 2c1341f63dc5afa19e94bc4aac51e5986b4b3cdf..43ec4a401f077809371e0360f22aa7f10c21af3b 100644 (file)
@@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 int radeon_vm_bo_rmv(struct radeon_device *rdev,
                     struct radeon_bo_va *bo_va)
 {
-       int r;
+       int r = 0;
 
        mutex_lock(&rdev->vm_manager.lock);
        mutex_lock(&bo_va->vm->mutex);
-       r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+       if (bo_va->soffset) {
+               r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+       }
        mutex_unlock(&rdev->vm_manager.lock);
        list_del(&bo_va->vm_list);
        mutex_unlock(&bo_va->vm->mutex);
index e17faa7cf732ee436e66f99fc0e3ac69c6798eab..82434018cbe81c3cd989de7f6586f88df57ddce1 100644 (file)
@@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
                return -ENOMEM;
        /* Align requested size with padding so unlock_commit can
         * pad safely */
+       radeon_ring_free_size(rdev, ring);
+       if (ring->ring_free_dw == (ring->ring_size / 4)) {
+               /* This is an empty ring update lockup info to avoid
+                * false positive.
+                */
+               radeon_ring_lockup_update(ring);
+       }
        ndw = (ndw + ring->align_mask) & ~ring->align_mask;
        while (ndw > (ring->ring_free_dw - 1)) {
                radeon_ring_free_size(rdev, ring);
index 906e5c0ca3b9a3e7f99ccc627674d4c3cbf3e260..cad735dd02c6f95260a92b268a446d1fbebe3be5 100644 (file)
@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
        if (!r) {
                radeon_bo_kunmap(rdev->uvd.vcpu_bo);
                radeon_bo_unpin(rdev->uvd.vcpu_bo);
+               rdev->uvd.cpu_addr = NULL;
+               if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
+                       radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+               }
                radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+               if (rdev->uvd.cpu_addr) {
+                       radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+               } else {
+                       rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
+               }
        }
        return r;
 }
@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
                return r;
        }
 
+       /* Have been pin in cpu unmap unpin */
+       radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+       radeon_bo_unpin(rdev->uvd.vcpu_bo);
+
        r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
                          &rdev->uvd.gpu_addr);
        if (r) {
@@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
        }
 
        /* stitch together an UVD create msg */
-       msg[0] = 0x00000de4;
-       msg[1] = 0x00000000;
-       msg[2] = handle;
-       msg[3] = 0x00000000;
-       msg[4] = 0x00000000;
-       msg[5] = 0x00000000;
-       msg[6] = 0x00000000;
-       msg[7] = 0x00000780;
-       msg[8] = 0x00000440;
-       msg[9] = 0x00000000;
-       msg[10] = 0x01b37000;
+       msg[0] = cpu_to_le32(0x00000de4);
+       msg[1] = cpu_to_le32(0x00000000);
+       msg[2] = cpu_to_le32(handle);
+       msg[3] = cpu_to_le32(0x00000000);
+       msg[4] = cpu_to_le32(0x00000000);
+       msg[5] = cpu_to_le32(0x00000000);
+       msg[6] = cpu_to_le32(0x00000000);
+       msg[7] = cpu_to_le32(0x00000780);
+       msg[8] = cpu_to_le32(0x00000440);
+       msg[9] = cpu_to_le32(0x00000000);
+       msg[10] = cpu_to_le32(0x01b37000);
        for (i = 11; i < 1024; ++i)
-               msg[i] = 0x0;
+               msg[i] = cpu_to_le32(0x0);
 
        radeon_bo_kunmap(bo);
        radeon_bo_unreserve(bo);
@@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
        }
 
        /* stitch together an UVD destroy msg */
-       msg[0] = 0x00000de4;
-       msg[1] = 0x00000002;
-       msg[2] = handle;
-       msg[3] = 0x00000000;
+       msg[0] = cpu_to_le32(0x00000de4);
+       msg[1] = cpu_to_le32(0x00000002);
+       msg[2] = cpu_to_le32(handle);
+       msg[3] = cpu_to_le32(0x00000000);
        for (i = 4; i < 1024; ++i)
-               msg[i] = 0x0;
+               msg[i] = cpu_to_le32(0x0);
 
        radeon_bo_kunmap(bo);
        radeon_bo_unreserve(bo);
index d6cbfe9df21810efe9255415787af029525d1131..fa061d46527f91e7de8838930a75d863ac7d3f6a 100644 (file)
@@ -137,7 +137,7 @@ static const struct xpad_device {
        { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
        { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
-       { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 },
+       { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
index 62a2c0e4cc998774e1c12185b4f04369bbde4d28..7ac9c9818d5562c5d3970ec8e59f4df63ce2aba7 100644 (file)
@@ -431,6 +431,7 @@ config KEYBOARD_TEGRA
 
 config KEYBOARD_OPENCORES
        tristate "OpenCores Keyboard Controller"
+       depends on HAS_IOMEM
        help
          Say Y here if you want to use the OpenCores Keyboard Controller
          http://www.opencores.org/project,keyboardcontroller
index aebfe3ecb9451638b21714949caff6c9c381fbf8..1bda828f4b5546cda5820cec874e66277a01a6f6 100644 (file)
@@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2
 
 config SERIO_ALTERA_PS2
        tristate "Altera UP PS/2 controller"
+       depends on HAS_IOMEM
        help
          Say Y here if you have Altera University Program PS/2 ports.
 
index 518282da6d850b180d56572c0517b62424f12e19..384fbcd0cee0d1b65d402beaa0f4f418e22e0a50 100644 (file)
@@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
                case 0x160802: /* Cintiq 13HD Pro Pen */
                case 0x180802: /* DTH2242 Pen */
+               case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
 
@@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
                case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
                case 0x18080a: /* DTH2242 Eraser */
+               case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
                        wacom->tool[idx] = BTN_TOOL_RUBBER;
                        break;
 
index 8e60437ac85b46569e57ae78f2bea455fd1b9ab5..ae89d2609ab0bd1a7e943cef3550ac0b992e18df 100644 (file)
@@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
        return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
 }
 
+static int cyttsp_handshake(struct cyttsp *ts)
+{
+       if (ts->pdata->use_hndshk)
+               return ttsp_send_command(ts,
+                               ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
+
+       return 0;
+}
+
 static int cyttsp_load_bl_regs(struct cyttsp *ts)
 {
        memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
        memcpy(bl_cmd, bl_command, sizeof(bl_command));
        if (ts->pdata->bl_keys)
                memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
-                       ts->pdata->bl_keys, sizeof(bl_command));
+                       ts->pdata->bl_keys, CY_NUM_BL_KEYS);
 
        error = ttsp_write_block_data(ts, CY_REG_BASE,
                                      sizeof(bl_cmd), bl_cmd);
@@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
        if (error)
                return error;
 
+       error = cyttsp_handshake(ts);
+       if (error)
+               return error;
+
        return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
 }
 
@@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
        if (error)
                return error;
 
+       error = cyttsp_handshake(ts);
+       if (error)
+               return error;
+
        if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
                return -EIO;
 
@@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
                goto out;
 
        /* provide flow control handshake */
-       if (ts->pdata->use_hndshk) {
-               error = ttsp_send_command(ts,
-                               ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
-               if (error)
-                       goto out;
-       }
+       error = cyttsp_handshake(ts);
+       if (error)
+               goto out;
 
        if (unlikely(ts->state == CY_IDLE_STATE))
                goto out;
index 1aa3c6967e70fb63610849f578f3afe08d0ac145..f1ebde369f8696d84ed977b28269841009bc54b1 100644 (file)
@@ -67,8 +67,8 @@ struct cyttsp_xydata {
 /* TTSP System Information interface definition */
 struct cyttsp_sysinfo_data {
        u8 hst_mode;
-       u8 mfg_cmd;
        u8 mfg_stat;
+       u8 mfg_cmd;
        u8 cid[3];
        u8 tt_undef1;
        u8 uid[8];
index 1760ceb68b7b61cf522e601c48d2f23aa11e6b21..19ceaa60e0f45c755fd67d840e8ee90de320d1ca 100644 (file)
@@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
 static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
-       if (action == CPU_STARTING)
+       if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
                gic_cpu_init(&gic_data[0]);
        return NOTIFY_OK;
 }
index 7f5a7cac6dc7e3982aff30754bafd8c388976a5b..8270388e2a0d2b703f2d08e2cc95533ff434dc14 100644 (file)
@@ -136,9 +136,9 @@ config DVB_NET
 
 # This Kconfig option is used by both PCI and USB drivers
 config TTPCI_EEPROM
-        tristate
-        depends on I2C
-        default n
+       tristate
+       depends on I2C
+       default n
 
 source "drivers/media/dvb-core/Kconfig"
 
@@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT
 
          If unsure say Y.
 
+config MEDIA_ATTACH
+       bool
+       depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
+       depends on MODULES
+       default MODULES
+
 source "drivers/media/i2c/Kconfig"
 source "drivers/media/tuners/Kconfig"
 source "drivers/media/dvb-frontends/Kconfig"
index cb52438e53ac46c59548fef332a0bd50902a18f6..9eac5310942fd97b503290a1a2f38ef09cb01479 100644 (file)
@@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
 
        if (fie->pad != OIF_SOURCE_PAD)
                return -EINVAL;
-       if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
+       if (fie->index >= ARRAY_SIZE(s5c73m3_intervals))
                return -EINVAL;
 
        mutex_lock(&state->lock);
index 27d62623274bbd967fa56559f3ed617a0557c6ba..aba5b1c649e693d3b205e12c2247f2c0f8a3b730 100644 (file)
@@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
        int changed = 0;
        u32 old;
 
-       if (core->board.audio_chip == V4L2_IDENT_WM8775)
+       if (core->sd_wm8775)
                snd_cx88_wm8775_volume_put(kcontrol, value);
 
        left = value->value.integer.value[0] & 0x3f;
@@ -682,8 +682,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
                vol ^= bit;
                cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
                /* Pass mute onto any WM8775 */
-               if ((core->board.audio_chip == V4L2_IDENT_WM8775) &&
-                   ((1<<6) == bit))
+               if (core->sd_wm8775 && ((1<<6) == bit))
                        wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
                ret = 1;
        }
@@ -903,7 +902,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
                goto error;
 
        /* If there's a wm8775 then add a Line-In ALC switch */
-       if (core->board.audio_chip == V4L2_IDENT_WM8775)
+       if (core->sd_wm8775)
                snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
 
        strcpy (card->driver, "CX88x");
index 1b00615fd3953a44966a07656180f4351dc7e43c..c7a9be1065c0b2d907a23dba31d5861201a96dc3 100644 (file)
@@ -385,8 +385,7 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
                /* The wm8775 module has the "2" route hardwired into
                   the initialization. Some boards may use different
                   routes for different inputs. HVR-1300 surely does */
-               if (core->board.audio_chip &&
-                   core->board.audio_chip == V4L2_IDENT_WM8775) {
+               if (core->sd_wm8775) {
                        call_all(core, audio, s_routing,
                                 INPUT(input).audioroute, 0, 0);
                }
@@ -771,8 +770,7 @@ static int video_open(struct file *file)
                cx_write(MO_GP1_IO, core->board.radio.gpio1);
                cx_write(MO_GP2_IO, core->board.radio.gpio2);
                if (core->board.radio.audioroute) {
-                       if(core->board.audio_chip &&
-                               core->board.audio_chip == V4L2_IDENT_WM8775) {
+                       if (core->sd_wm8775) {
                                call_all(core, audio, s_routing,
                                        core->board.radio.audioroute, 0, 0);
                        }
@@ -959,7 +957,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
        u32 value,mask;
 
        /* Pass changes onto any WM8775 */
-       if (core->board.audio_chip == V4L2_IDENT_WM8775) {
+       if (core->sd_wm8775) {
                switch (ctrl->id) {
                case V4L2_CID_AUDIO_MUTE:
                        wm8775_s_ctrl(core, ctrl->id, ctrl->val);
index 48b8d7af386d5538bdc8b28219dd168cd540fcbd..9d1481a60bd97d5ce60e8387365a7e22cbc50016 100644 (file)
@@ -576,6 +576,14 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
        return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
 }
 
+static int vidioc_create_bufs(struct file *file, void *priv,
+                             struct v4l2_create_buffers *create)
+{
+       struct coda_ctx *ctx = fh_to_ctx(priv);
+
+       return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
+}
+
 static int vidioc_streamon(struct file *file, void *priv,
                           enum v4l2_buf_type type)
 {
@@ -610,6 +618,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
 
        .vidioc_qbuf            = vidioc_qbuf,
        .vidioc_dqbuf           = vidioc_dqbuf,
+       .vidioc_create_bufs     = vidioc_create_bufs,
 
        .vidioc_streamon        = vidioc_streamon,
        .vidioc_streamoff       = vidioc_streamoff,
index 1802f11e939f22a1ff9ca56fc42c5ff91cfffc8f..d0b375cf565fc9c384a5e7e93b16864b7c48d8ec 100644 (file)
@@ -916,6 +916,21 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
        other video window */
 
        layer->pix_fmt = *pixfmt;
+       if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
+               struct vpbe_layer *otherlayer;
+
+               otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
+               /* if other layer is available, only
+                * claim it, do not configure it
+                */
+               ret = osd_device->ops.request_layer(osd_device,
+                                                   otherlayer->layer_info.id);
+               if (ret < 0) {
+                       v4l2_err(&vpbe_dev->v4l2_dev,
+                                "Display Manager failed to allocate layer\n");
+                       return -EBUSY;
+               }
+       }
 
        /* Get osd layer config */
        osd_device->ops.get_layer_config(osd_device,
index 8c50d3074866218c829e990ca376ab0744dc1a17..93609091cb237d837b513ed8661a40cbce0e17bc 100644 (file)
@@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev)
        if (NULL == ccdc_cfg) {
                v4l2_err(pdev->dev.driver,
                         "Memory allocation failed for ccdc_cfg\n");
-               goto probe_free_lock;
+               goto probe_free_dev_mem;
        }
 
        mutex_lock(&ccdc_lock);
@@ -1991,7 +1991,6 @@ probe_out_release_irq:
        free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
 probe_free_ccdc_cfg_mem:
        kfree(ccdc_cfg);
-probe_free_lock:
        mutex_unlock(&ccdc_lock);
 probe_free_dev_mem:
        kfree(vpfe_dev);
index b0ff67bc1b056647ecfcc7b3acc5812a9b0408ab..d05eaa2c84909cd5b78430030b2baeef005335c9 100644 (file)
@@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
                HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
        };
 
-       if (WARN_ON(is->config_index > ARRAY_SIZE(cmd)))
+       if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
                return -EINVAL;
 
        mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
index 47c6363d04e2ce5e6ef5d631e26223daf81c41ef..0741945b79ed08829d0fe7490771a9f725f6063c 100644 (file)
@@ -48,7 +48,6 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = {
        [ISS_CLK_LITE0]                 = "lite0",
        [ISS_CLK_LITE1]                 = "lite1",
        [ISS_CLK_MPLL]                  = "mpll",
-       [ISS_CLK_SYSREG]                = "sysreg",
        [ISS_CLK_ISP]                   = "isp",
        [ISS_CLK_DRC]                   = "drc",
        [ISS_CLK_FD]                    = "fd",
@@ -71,7 +70,6 @@ static void fimc_is_put_clocks(struct fimc_is *is)
        for (i = 0; i < ISS_CLKS_MAX; i++) {
                if (IS_ERR(is->clocks[i]))
                        continue;
-               clk_unprepare(is->clocks[i]);
                clk_put(is->clocks[i]);
                is->clocks[i] = ERR_PTR(-EINVAL);
        }
@@ -90,12 +88,6 @@ static int fimc_is_get_clocks(struct fimc_is *is)
                        ret = PTR_ERR(is->clocks[i]);
                        goto err;
                }
-               ret = clk_prepare(is->clocks[i]);
-               if (ret < 0) {
-                       clk_put(is->clocks[i]);
-                       is->clocks[i] = ERR_PTR(-EINVAL);
-                       goto err;
-               }
        }
 
        return 0;
@@ -103,7 +95,7 @@ err:
        fimc_is_put_clocks(is);
        dev_err(&is->pdev->dev, "failed to get clock: %s\n",
                fimc_is_clocks[i]);
-       return -ENXIO;
+       return ret;
 }
 
 static int fimc_is_setup_clocks(struct fimc_is *is)
@@ -144,7 +136,7 @@ int fimc_is_enable_clocks(struct fimc_is *is)
        for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
                if (IS_ERR(is->clocks[i]))
                        continue;
-               ret = clk_enable(is->clocks[i]);
+               ret = clk_prepare_enable(is->clocks[i]);
                if (ret < 0) {
                        dev_err(&is->pdev->dev, "clock %s enable failed\n",
                                fimc_is_clocks[i]);
@@ -163,7 +155,7 @@ void fimc_is_disable_clocks(struct fimc_is *is)
 
        for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
                if (!IS_ERR(is->clocks[i])) {
-                       clk_disable(is->clocks[i]);
+                       clk_disable_unprepare(is->clocks[i]);
                        pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
                }
        }
@@ -326,6 +318,11 @@ int fimc_is_start_firmware(struct fimc_is *is)
        struct device *dev = &is->pdev->dev;
        int ret;
 
+       if (is->fw.f_w == NULL) {
+               dev_err(dev, "firmware is not loaded\n");
+               return -EINVAL;
+       }
+
        memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
        wmb();
 
@@ -837,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev)
                goto err_clk;
        }
        pm_runtime_enable(dev);
-       /*
-        * Enable only the ISP power domain, keep FIMC-IS clocks off until
-        * the whole clock tree is configured. The ISP power domain needs
-        * be active in order to acces any CMU_ISP clock registers.
-        */
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
-               goto err_irq;
-
-       ret = fimc_is_setup_clocks(is);
-       pm_runtime_put_sync(dev);
 
+       ret = pm_runtime_get_sync(dev);
        if (ret < 0)
                goto err_irq;
 
-       is->clk_init = true;
-
        is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
        if (IS_ERR(is->alloc_ctx)) {
                ret = PTR_ERR(is->alloc_ctx);
@@ -875,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_dfs;
 
+       pm_runtime_put_sync(dev);
+
        dev_dbg(dev, "FIMC-IS registered successfully\n");
        return 0;
 
@@ -894,9 +881,11 @@ err_clk:
 static int fimc_is_runtime_resume(struct device *dev)
 {
        struct fimc_is *is = dev_get_drvdata(dev);
+       int ret;
 
-       if (!is->clk_init)
-               return 0;
+       ret = fimc_is_setup_clocks(is);
+       if (ret)
+               return ret;
 
        return fimc_is_enable_clocks(is);
 }
@@ -905,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev)
 {
        struct fimc_is *is = dev_get_drvdata(dev);
 
-       if (is->clk_init)
-               fimc_is_disable_clocks(is);
-
+       fimc_is_disable_clocks(is);
        return 0;
 }
 
@@ -941,7 +928,8 @@ static int fimc_is_remove(struct platform_device *pdev)
        vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
        fimc_is_put_clocks(is);
        fimc_is_debugfs_remove(is);
-       release_firmware(is->fw.f_w);
+       if (is->fw.f_w)
+               release_firmware(is->fw.f_w);
        fimc_is_free_cpu_memory(is);
 
        return 0;
index f5275a5b0156693cc4f04f7ed4f757fa32839243..d7db133b493f7068e9f5281af8a2c95b9c21bf50 100644 (file)
@@ -73,7 +73,6 @@ enum {
        ISS_CLK_LITE0,
        ISS_CLK_LITE1,
        ISS_CLK_MPLL,
-       ISS_CLK_SYSREG,
        ISS_CLK_ISP,
        ISS_CLK_DRC,
        ISS_CLK_FD,
@@ -265,7 +264,6 @@ struct fimc_is {
        spinlock_t                      slock;
 
        struct clk                      *clocks[ISS_CLKS_MAX];
-       bool                            clk_init;
        void __iomem                    *regs;
        void __iomem                    *pmu_regs;
        int                             irq;
index d63947f7b30205e69c0c09e4bd05cfc3a7817019..7ede30b5910fab78b8c70bf022c565cbc5ab6021 100644 (file)
@@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
                return 0;
        }
 
-       mf->colorspace = V4L2_COLORSPACE_JPEG;
+       mf->colorspace = V4L2_COLORSPACE_SRGB;
 
        mutex_lock(&isp->subdev_lock);
        __is_get_frame_size(is, &cur_fmt);
@@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
        v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
                 __func__, fmt->pad, mf->code, mf->width, mf->height);
 
-       mf->colorspace = V4L2_COLORSPACE_JPEG;
+       mf->colorspace = V4L2_COLORSPACE_SRGB;
 
        mutex_lock(&isp->subdev_lock);
        __isp_subdev_try_format(isp, fmt);
index a2eda9d5ac87c73d38f1dab3c6c5b6541e3ca6ec..254d70fe762ab3b24367727afa62715229fb9e66 100644 (file)
@@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
        node = v4l2_of_get_next_endpoint(node, NULL);
        if (!node) {
                dev_err(&pdev->dev, "No port node at %s\n",
-                                       node->full_name);
+                               pdev->dev.of_node->full_name);
                return -EINVAL;
        }
        /* Get port node and validate MIPI-CSI channel id. */
index 261134baa655471e4adc55667a68b979e56ddcaa..35d2fcdc0036b39914eaabb80d70433f6f438bcb 100644 (file)
@@ -229,7 +229,7 @@ struct camif_vp {
        unsigned int            state;
        u16                     fmt_flags;
        u8                      id;
-       u                     rotation;
+       u16                     rotation;
        u8                      hflip;
        u8                      vflip;
        unsigned int            offset;
index ddc2900d88a29da54c772afc0122f15a46da325e..d18cb5edd2d53975cea549bd4c696d618fb88c55 100644 (file)
@@ -1,2 +1,2 @@
 s5p-jpeg-objs := jpeg-core.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
index 379008c6d09adfca74f45afda0109b2bb3640768..15f59b324fefc396c9de24f5ae8be2694303ea7a 100644 (file)
@@ -1,4 +1,4 @@
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
 s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
 s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
 s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
index 01f9ae0dadb0e1564d873d49694806d3283fc2e7..d12faa691af8fbb7f3e41f7c6f9516140067d565 100644 (file)
@@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
 leave_handle_frame:
        spin_unlock_irqrestore(&dev->irqlock, flags);
        if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
-                                   || ctx->dst_queue_cnt < ctx->dpb_count)
+                                   || ctx->dst_queue_cnt < ctx->pb_count)
                clear_work_bit(ctx);
        s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
        wake_up_ctx(ctx, reason, err);
@@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
 
                s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
 
-               ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
+               ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
                                dev);
                ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
                                dev);
@@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
        struct s5p_mfc_dev *dev = ctx->dev;
        struct s5p_mfc_buf *mb_entry;
 
-       mfc_debug(2, "Stream completed");
+       mfc_debug(2, "Stream completed\n");
 
        s5p_mfc_clear_int_flags(dev);
        ctx->int_type = reason;
@@ -1362,7 +1362,6 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
        .port_num       = MFC_NUM_PORTS,
        .buf_size       = &buf_size_v5,
        .buf_align      = &mfc_buf_align_v5,
-       .mclk_name      = "sclk_mfc",
        .fw_name        = "s5p-mfc.fw",
 };
 
@@ -1389,7 +1388,6 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
        .port_num       = MFC_NUM_PORTS_V6,
        .buf_size       = &buf_size_v6,
        .buf_align      = &mfc_buf_align_v6,
-       .mclk_name      = "aclk_333",
        .fw_name        = "s5p-mfc-v6.fw",
 };
 
index 202d1d7a37a8d87a96b5a56ab0d8f3724eeb8add..ef4074cd53163faa1f9c8776ccae59d49d97d201 100644 (file)
@@ -138,6 +138,7 @@ enum s5p_mfc_inst_state {
        MFCINST_INIT = 100,
        MFCINST_GOT_INST,
        MFCINST_HEAD_PARSED,
+       MFCINST_HEAD_PRODUCED,
        MFCINST_BUFS_SET,
        MFCINST_RUNNING,
        MFCINST_FINISHING,
@@ -231,7 +232,6 @@ struct s5p_mfc_variant {
        unsigned int port_num;
        struct s5p_mfc_buf_size *buf_size;
        struct s5p_mfc_buf_align *buf_align;
-       char    *mclk_name;
        char    *fw_name;
 };
 
@@ -438,7 +438,7 @@ struct s5p_mfc_enc_params {
        u32 rc_framerate_num;
        u32 rc_framerate_denom;
 
-       union {
+       struct {
                struct s5p_mfc_h264_enc_params h264;
                struct s5p_mfc_mpeg4_enc_params mpeg4;
        } codec;
@@ -602,7 +602,7 @@ struct s5p_mfc_ctx {
        int after_packed_pb;
        int sei_fp_parse;
 
-       int dpb_count;
+       int pb_count;
        int total_dpb_count;
        int mv_count;
        /* Buffers */
index 2e5f30b40deaaf16f8b5062232976cb0ee5496a9..dc1fc94a488d06c781a5fb83ee4dcecfa74687e6 100644 (file)
@@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
        dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
                                        &dev->bank1, GFP_KERNEL);
 
-       if (IS_ERR(dev->fw_virt_addr)) {
+       if (IS_ERR_OR_NULL(dev->fw_virt_addr)) {
                dev->fw_virt_addr = NULL;
                mfc_err("Allocating bitprocessor buffer failed\n");
                return -ENOMEM;
index bd5cd4ae993ce778b066395259f9dce3a04cce31..8e608f5aa0d7c866009a06f7b203c51d136040f1 100644 (file)
@@ -30,8 +30,8 @@ extern int debug;
 #define mfc_debug(level, fmt, args...)
 #endif
 
-#define mfc_debug_enter() mfc_debug(5, "enter")
-#define mfc_debug_leave() mfc_debug(5, "leave")
+#define mfc_debug_enter() mfc_debug(5, "enter\n")
+#define mfc_debug_leave() mfc_debug(5, "leave\n")
 
 #define mfc_err(fmt, args...)                          \
        do {                                            \
index 4af53bd2f182344b232b5b00aece57b35b03e9e5..00b07032f4f0c7878f1ed94d5b45cbb9512f2eda 100644 (file)
@@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
        /* Context is to decode a frame */
        if (ctx->src_queue_cnt >= 1 &&
            ctx->state == MFCINST_RUNNING &&
-           ctx->dst_queue_cnt >= ctx->dpb_count)
+           ctx->dst_queue_cnt >= ctx->pb_count)
                return 1;
        /* Context is to return last frame */
        if (ctx->state == MFCINST_FINISHING &&
-           ctx->dst_queue_cnt >= ctx->dpb_count)
+           ctx->dst_queue_cnt >= ctx->pb_count)
                return 1;
        /* Context is to set buffers */
        if (ctx->src_queue_cnt >= 1 &&
@@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
        /* Resolution change */
        if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
                ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
-               ctx->dst_queue_cnt >= ctx->dpb_count)
+               ctx->dst_queue_cnt >= ctx->pb_count)
                return 1;
        if (ctx->state == MFCINST_RES_CHANGE_END &&
                ctx->src_queue_cnt >= 1)
@@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
                        mfc_err("vb2_reqbufs on capture failed\n");
                        return ret;
                }
-               if (reqbufs->count < ctx->dpb_count) {
+               if (reqbufs->count < ctx->pb_count) {
                        mfc_err("Not enough buffers allocated\n");
                        reqbufs->count = 0;
                        s5p_mfc_clock_on();
@@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
                if (ctx->state >= MFCINST_HEAD_PARSED &&
                    ctx->state < MFCINST_ABORT) {
-                       ctrl->val = ctx->dpb_count;
+                       ctrl->val = ctx->pb_count;
                        break;
                } else if (ctx->state != MFCINST_INIT) {
                        v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
@@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
                                S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
                if (ctx->state >= MFCINST_HEAD_PARSED &&
                    ctx->state < MFCINST_ABORT) {
-                       ctrl->val = ctx->dpb_count;
+                       ctrl->val = ctx->pb_count;
                } else {
                        v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
                        return -EINVAL;
@@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
                /* Output plane count is 2 - one for Y and one for CbCr */
                *plane_count = 2;
                /* Setup buffer count */
-               if (*buf_count < ctx->dpb_count)
-                       *buf_count = ctx->dpb_count;
-               if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
-                       *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
+               if (*buf_count < ctx->pb_count)
+                       *buf_count = ctx->pb_count;
+               if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB)
+                       *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB;
                if (*buf_count > MFC_MAX_BUFFERS)
                        *buf_count = MFC_MAX_BUFFERS;
        } else {
index 4f6b553c4b2de90f6686f2da38cf5221f68c352d..2549967b2f8589bb46007b8282520250392287b7 100644 (file)
@@ -592,7 +592,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
                return 1;
        /* context is ready to encode a frame */
        if ((ctx->state == MFCINST_RUNNING ||
-               ctx->state == MFCINST_HEAD_PARSED) &&
+               ctx->state == MFCINST_HEAD_PRODUCED) &&
                ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
                return 1;
        /* context is ready to encode remaining frames */
@@ -649,6 +649,7 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
        struct s5p_mfc_enc_params *p = &ctx->enc_params;
        struct s5p_mfc_buf *dst_mb;
        unsigned long flags;
+       unsigned int enc_pb_count;
 
        if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
                spin_lock_irqsave(&dev->irqlock, flags);
@@ -661,18 +662,19 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
                vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
                spin_unlock_irqrestore(&dev->irqlock, flags);
        }
-       if (IS_MFCV6(dev)) {
-               ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */
-       } else {
+
+       if (!IS_MFCV6(dev)) {
                ctx->state = MFCINST_RUNNING;
                if (s5p_mfc_ctx_ready(ctx))
                        set_work_bit_irqsave(ctx);
                s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
-       }
-
-       if (IS_MFCV6(dev))
-               ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops,
+       } else {
+               enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
                                get_enc_dpb_count, dev);
+               if (ctx->pb_count < enc_pb_count)
+                       ctx->pb_count = enc_pb_count;
+               ctx->state = MFCINST_HEAD_PRODUCED;
+       }
 
        return 0;
 }
@@ -717,9 +719,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
 
        slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
        strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
-       mfc_debug(2, "Encoded slice type: %d", slice_type);
-       mfc_debug(2, "Encoded stream size: %d", strm_size);
-       mfc_debug(2, "Display order: %d",
+       mfc_debug(2, "Encoded slice type: %d\n", slice_type);
+       mfc_debug(2, "Encoded stream size: %d\n", strm_size);
+       mfc_debug(2, "Display order: %d\n",
                  mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
        spin_lock_irqsave(&dev->irqlock, flags);
        if (slice_type >= 0) {
@@ -1055,15 +1057,13 @@ static int vidioc_reqbufs(struct file *file, void *priv,
                }
                ctx->capture_state = QUEUE_BUFS_REQUESTED;
 
-               if (!IS_MFCV6(dev)) {
-                       ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
-                                       alloc_codec_buffers, ctx);
-                       if (ret) {
-                               mfc_err("Failed to allocate encoding buffers\n");
-                               reqbufs->count = 0;
-                               ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
-                               return -ENOMEM;
-                       }
+               ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
+                               alloc_codec_buffers, ctx);
+               if (ret) {
+                       mfc_err("Failed to allocate encoding buffers\n");
+                       reqbufs->count = 0;
+                       ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+                       return -ENOMEM;
                }
        } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
                if (ctx->output_state != QUEUE_FREE) {
@@ -1071,6 +1071,19 @@ static int vidioc_reqbufs(struct file *file, void *priv,
                                                        ctx->output_state);
                        return -EINVAL;
                }
+
+               if (IS_MFCV6(dev)) {
+                       /* Check for min encoder buffers */
+                       if (ctx->pb_count &&
+                               (reqbufs->count < ctx->pb_count)) {
+                               reqbufs->count = ctx->pb_count;
+                               mfc_debug(2, "Minimum %d output buffers needed\n",
+                                               ctx->pb_count);
+                       } else {
+                               ctx->pb_count = reqbufs->count;
+                       }
+               }
+
                ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
                if (ret != 0) {
                        mfc_err("error in vb2_reqbufs() for E(S)\n");
@@ -1533,14 +1546,14 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
 
                spin_lock_irqsave(&dev->irqlock, flags);
                if (list_empty(&ctx->src_queue)) {
-                       mfc_debug(2, "EOS: empty src queue, entering finishing state");
+                       mfc_debug(2, "EOS: empty src queue, entering finishing state\n");
                        ctx->state = MFCINST_FINISHING;
                        if (s5p_mfc_ctx_ready(ctx))
                                set_work_bit_irqsave(ctx);
                        spin_unlock_irqrestore(&dev->irqlock, flags);
                        s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
                } else {
-                       mfc_debug(2, "EOS: marking last buffer of stream");
+                       mfc_debug(2, "EOS: marking last buffer of stream\n");
                        buf = list_entry(ctx->src_queue.prev,
                                                struct s5p_mfc_buf, list);
                        if (buf->flags & MFC_BUF_FLAG_USED)
@@ -1609,9 +1622,9 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
                        mfc_err("failed to get plane cookie\n");
                        return -EINVAL;
                }
-               mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
-                               vb->v4l2_buf.index, i,
-                               vb2_dma_contig_plane_dma_addr(vb, i));
+               mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n",
+                         vb->v4l2_buf.index, i,
+                         vb2_dma_contig_plane_dma_addr(vb, i));
        }
        return 0;
 }
@@ -1760,11 +1773,27 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
        struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
        struct s5p_mfc_dev *dev = ctx->dev;
 
-       v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+       if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
+
+               if ((ctx->state == MFCINST_GOT_INST) &&
+                       (dev->curr_ctx == ctx->num) && dev->hw_lock) {
+                       s5p_mfc_wait_for_done_ctx(ctx,
+                                               S5P_MFC_R2H_CMD_SEQ_DONE_RET,
+                                               0);
+               }
+
+               if (ctx->src_bufs_cnt < ctx->pb_count) {
+                       mfc_err("Need minimum %d OUTPUT buffers\n",
+                                       ctx->pb_count);
+                       return -EINVAL;
+               }
+       }
+
        /* If context is ready then dev = work->data;schedule it to run */
        if (s5p_mfc_ctx_ready(ctx))
                set_work_bit_irqsave(ctx);
        s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+
        return 0;
 }
 
@@ -1920,6 +1949,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
                if (controls[i].is_volatile && ctx->ctrls[i])
                        ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
        }
+       v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
        return 0;
 }
 
index 0af05a2d1cd411f5c3b3bb840ff37cbee9cd0b24..368582b091bfb35ac65ef7bf81705156fe36ae82 100644 (file)
@@ -1275,8 +1275,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
        s5p_mfc_clean_ctx_int_flags(ctx);
-       mfc_debug(2, "encoding buffer with index=%d state=%d",
-                       src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
+       mfc_debug(2, "encoding buffer with index=%d state=%d\n",
+                 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
        s5p_mfc_encode_one_frame_v5(ctx);
        return 0;
 }
index 7e76fce2e524435f301955c286d5932cbf76adb2..66f0d042357fd32465fddd19a34a4ac201633163 100644 (file)
@@ -62,12 +62,6 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
        /* NOP */
 }
 
-static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
-{
-       /* NOP */
-       return -1;
-}
-
 /* Allocate codec buffers */
 static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
 {
@@ -167,7 +161,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
                                S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
                ctx->bank1.size =
                        ctx->scratch_buf_size + ctx->tmv_buffer_size +
-                       (ctx->dpb_count * (ctx->luma_dpb_size +
+                       (ctx->pb_count * (ctx->luma_dpb_size +
                        ctx->chroma_dpb_size + ctx->me_buffer_size));
                ctx->bank2.size = 0;
                break;
@@ -181,7 +175,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
                                S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
                ctx->bank1.size =
                        ctx->scratch_buf_size + ctx->tmv_buffer_size +
-                       (ctx->dpb_count * (ctx->luma_dpb_size +
+                       (ctx->pb_count * (ctx->luma_dpb_size +
                        ctx->chroma_dpb_size + ctx->me_buffer_size));
                ctx->bank2.size = 0;
                break;
@@ -198,7 +192,6 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
                }
                BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
        }
-
        return 0;
 }
 
@@ -449,8 +442,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
        WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */
        WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6);
 
-       mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d",
-               addr, size);
+       mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n",
+                 addr, size);
 
        return 0;
 }
@@ -463,8 +456,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
        WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */
        WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
 
-       mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr);
-       mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr);
+       mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
+       mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
 }
 
 static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
@@ -479,8 +472,8 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
        enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
        enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
 
-       mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr);
-       mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr);
+       mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr);
+       mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
 }
 
 /* Set encoding ref & codec buffer */
@@ -497,7 +490,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
 
        mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
 
-       for (i = 0; i < ctx->dpb_count; i++) {
+       for (i = 0; i < ctx->pb_count; i++) {
                WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i));
                buf_addr1 += ctx->luma_dpb_size;
                WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i));
@@ -520,7 +513,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
        buf_size1 -= ctx->tmv_buffer_size;
 
        mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n",
-                       buf_addr1, buf_size1, ctx->dpb_count);
+                       buf_addr1, buf_size1, ctx->pb_count);
        if (buf_size1 < 0) {
                mfc_debug(2, "Not enough memory has been allocated.\n");
                return -ENOMEM;
@@ -1431,8 +1424,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
        src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
        src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
 
-       mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr);
-       mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr);
+       mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
+       mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
 
        s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
 
@@ -1522,22 +1515,6 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
        struct s5p_mfc_dev *dev = ctx->dev;
        int ret;
 
-       ret = s5p_mfc_alloc_codec_buffers_v6(ctx);
-       if (ret) {
-               mfc_err("Failed to allocate encoding buffers.\n");
-               return -ENOMEM;
-       }
-
-       /* Header was generated now starting processing
-        * First set the reference frame buffers
-        */
-       if (ctx->capture_state != QUEUE_BUFS_REQUESTED) {
-               mfc_err("It seems that destionation buffers were not\n"
-                       "requested.MFC requires that header should be generated\n"
-                       "before allocating codec buffer.\n");
-               return -EAGAIN;
-       }
-
        dev->curr_ctx = ctx->num;
        s5p_mfc_clean_ctx_int_flags(ctx);
        ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
@@ -1582,7 +1559,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
        mfc_debug(1, "Seting new context to %p\n", ctx);
        /* Got context to run in ctx */
        mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
-               ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt);
+               ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
        mfc_debug(1, "ctx->state=%d\n", ctx->state);
        /* Last frame has already been sent to MFC
         * Now obtaining frames from MFC buffer */
@@ -1647,7 +1624,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
                case MFCINST_GOT_INST:
                        s5p_mfc_run_init_enc(ctx);
                        break;
-               case MFCINST_HEAD_PARSED: /* Only for MFC6.x */
+               case MFCINST_HEAD_PRODUCED:
                        ret = s5p_mfc_run_init_enc_buffers(ctx);
                        break;
                default:
@@ -1730,7 +1707,7 @@ static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
        return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6);
 }
 
-static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev)
+static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
 {
        return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6);
 }
index 6aa38a56aaf26fda9adbe142aac5f4efbc20ce4e..11d5f1dada32b1d593b53b0555f4ebcdb371b9d2 100644 (file)
@@ -50,19 +50,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
                goto err_p_ip_clk;
        }
 
-       pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name);
-       if (IS_ERR(pm->clock)) {
-               mfc_err("Failed to get MFC clock\n");
-               ret = PTR_ERR(pm->clock);
-               goto err_g_ip_clk_2;
-       }
-
-       ret = clk_prepare(pm->clock);
-       if (ret) {
-               mfc_err("Failed to prepare MFC clock\n");
-               goto err_p_ip_clk_2;
-       }
-
        atomic_set(&pm->power, 0);
 #ifdef CONFIG_PM_RUNTIME
        pm->device = &dev->plat_dev->dev;
@@ -72,10 +59,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
        atomic_set(&clk_ref, 0);
 #endif
        return 0;
-err_p_ip_clk_2:
-       clk_put(pm->clock);
-err_g_ip_clk_2:
-       clk_unprepare(pm->clock_gate);
 err_p_ip_clk:
        clk_put(pm->clock_gate);
 err_g_ip_clk:
@@ -86,8 +69,6 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
 {
        clk_unprepare(pm->clock_gate);
        clk_put(pm->clock_gate);
-       clk_unprepare(pm->clock);
-       clk_put(pm->clock);
 #ifdef CONFIG_PM_RUNTIME
        pm_runtime_disable(pm->device);
 #endif
@@ -98,7 +79,7 @@ int s5p_mfc_clock_on(void)
        int ret;
 #ifdef CLK_DEBUG
        atomic_inc(&clk_ref);
-       mfc_debug(3, "+ %d", atomic_read(&clk_ref));
+       mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
 #endif
        ret = clk_enable(pm->clock_gate);
        return ret;
@@ -108,7 +89,7 @@ void s5p_mfc_clock_off(void)
 {
 #ifdef CLK_DEBUG
        atomic_dec(&clk_ref);
-       mfc_debug(3, "- %d", atomic_read(&clk_ref));
+       mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
 #endif
        clk_disable(pm->clock_gate);
 }
index 0b32cc3f6a4749bb93599e8094f785ef237bd764..59a9deefb24253455d531c71888c08afd49c8dc8 100644 (file)
@@ -905,11 +905,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq,
                if (ftmp.fmt.pix.width != pix->width ||
                    ftmp.fmt.pix.height != pix->height)
                        return -EINVAL;
-               size = pix->bytesperline ? pix->bytesperline * pix->height :
-                       pix->width * pix->height * fmt->depth >> 3;
+               size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth :
+                       pix->width * pix->height * fmt->depth / fmt->ydepth;
        } else {
                vfmt = sh_veu_get_vfmt(veu, vq->type);
-               size = vfmt->bytesperline * vfmt->frame.height;
+               size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth;
        }
 
        if (count < 2)
@@ -1033,8 +1033,6 @@ static int sh_veu_release(struct file *file)
 
        dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
 
-       pm_runtime_put(veu->dev);
-
        if (veu_file == veu->capture) {
                veu->capture = NULL;
                vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
@@ -1050,6 +1048,8 @@ static int sh_veu_release(struct file *file)
                veu->m2m_ctx = NULL;
        }
 
+       pm_runtime_put(veu->dev);
+
        kfree(veu_file);
 
        return 0;
@@ -1138,10 +1138,7 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id)
 
        veu->xaction++;
 
-       if (!veu->aborting)
-               return IRQ_WAKE_THREAD;
-
-       return IRQ_HANDLED;
+       return IRQ_WAKE_THREAD;
 }
 
 static int sh_veu_probe(struct platform_device *pdev)
index eea832c5fd010e18ca4daffb3c434a5907bcc52c..3a4efbdc7668e30edfe74664809b744b56a1115d 100644 (file)
@@ -643,9 +643,9 @@ static int soc_camera_close(struct file *file)
 
                if (ici->ops->init_videobuf2)
                        vb2_queue_release(&icd->vb2_vidq);
-               ici->ops->remove(icd);
-
                __soc_camera_power_off(icd);
+
+               ici->ops->remove(icd);
        }
 
        if (icd->streamer == file)
index c0beee2fa37c1f2a31453c5cce0ff647ff9726cf..d529ba788f41753f17a97f1c7b920feb3a946d1f 100644 (file)
@@ -22,6 +22,7 @@ config RADIO_SI476X
        tristate "Silicon Laboratories Si476x I2C FM Radio"
        depends on I2C && VIDEO_V4L2
        depends on MFD_SI476X_CORE
+       depends on SND_SOC
        select SND_SOC_SI476X
        ---help---
          Choose Y here if you have this FM radio chip.
index 9430c6a2993734329c09d688fbd8806a306eac6a..9dc8bafe6486a07243c546b4fc45f00ebcceb84a 100644 (file)
@@ -44,7 +44,7 @@
 
 #define FREQ_MUL (10000000 / 625)
 
-#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status))
+#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status))
 
 #define DRIVER_NAME "si476x-radio"
 #define DRIVER_CARD "SI476x AM/FM Receiver"
index f6768cad001a58c8d859a95a224fc8ad3b78305c..15665debc572437a7e0544dacca3efe16d06ce0a 100644 (file)
@@ -1,23 +1,3 @@
-config MEDIA_ATTACH
-       bool "Load and attach frontend and tuner driver modules as needed"
-       depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
-       depends on MODULES
-       default y if !EXPERT
-       help
-         Remove the static dependency of DVB card drivers on all
-         frontend modules for all possible card variants. Instead,
-         allow the card drivers to only load the frontend modules
-         they require.
-
-         Also, tuner module will automatically load a tuner driver
-         when needed, for analog mode.
-
-         This saves several KBytes of memory.
-
-         Note: You will need module-init-tools v3.2 or later for this feature.
-
-         If unsure say Y.
-
 # Analog TV tuners, auto-loaded via tuner.ko
 config MEDIA_TUNER
        tristate
index 22015fe1a0f322580ff9be7379f12ccc7ac9124e..2cc8ec70e3b68cee498a850525973c771354eef3 100644 (file)
@@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
        struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf};
        struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
        struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
-       struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf};
+       struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
 
        dev_dbg(&d->udev->dev, "%s:\n", __func__);
 
@@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
                goto found;
        }
 
-       /* check R820T by reading tuner stats at I2C addr 0x1a */
+       /* check R820T ID register; reg=00 val=69 */
        ret = rtl28xxu_ctrl_msg(d, &req_r820t);
-       if (ret == 0) {
+       if (ret == 0 && buf[0] == 0x69) {
                priv->tuner = TUNER_RTL2832_R820T;
                priv->tuner_name = "R820T";
                goto found;
index 3fe207e038c75b5ea260745149d271463f89d1e9..d7ff3b9687c57cb22c504e7a3a9493e41fd924eb 100644 (file)
@@ -1159,6 +1159,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
                        regs[0x01] = 0x44; /* Select 24 Mhz clock */
                        regs[0x12] = 0x02; /* Set hstart to 2 */
                }
+               break;
+       case SENSOR_PAS202:
+               /* For some unknown reason we need to increase hstart by 1 on
+                  the sn9c103, otherwise we get wrong colors (bayer shift). */
+               if (sd->bridge == BRIDGE_103)
+                       regs[0x12] += 1;
+               break;
        }
        /* Disable compression when the raw bayer format has been selected */
        if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
index 7a6a0d39c2c669777d8aa7cbd61ff57de4ff865c..81b017a554bcaf939c1c226c65c9c225f83aa758 100644 (file)
@@ -226,7 +226,7 @@ struct pwc_device
        struct list_head queued_bufs;
        spinlock_t queued_bufs_lock; /* Protects queued_bufs */
 
-       /* Note if taking both locks v4l2_lock must always be locked first! */
+       /* If taking both locks vb_queue_lock must always be locked first! */
        struct mutex v4l2_lock;      /* Protects everything else */
        struct mutex vb_queue_lock;  /* Protects vb_queue and capt_file */
 
index ebb8e48619a29583f15bd298b92f6634d05e0a42..fccd08b66d1a04e16cfc127bb672e2c0fbe18e4d 100644 (file)
@@ -1835,6 +1835,8 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
 {
        if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
                return true;
+       if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+               return true;
        switch (ctrl->id) {
        case V4L2_CID_AUDIO_MUTE:
        case V4L2_CID_AUDIO_VOLUME:
index f81bda1a48ec33ce603a1f741466bac726298e21..7658586fe5f4607fa7487c10a312913103b19d53 100644 (file)
@@ -243,7 +243,6 @@ static void v4l_print_format(const void *arg, bool write_only)
        const struct v4l2_vbi_format *vbi;
        const struct v4l2_sliced_vbi_format *sliced;
        const struct v4l2_window *win;
-       const struct v4l2_clip *clip;
        unsigned i;
 
        pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -253,7 +252,7 @@ static void v4l_print_format(const void *arg, bool write_only)
                pix = &p->fmt.pix;
                pr_cont(", width=%u, height=%u, "
                        "pixelformat=%c%c%c%c, field=%s, "
-                       "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+                       "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
                        pix->width, pix->height,
                        (pix->pixelformat & 0xff),
                        (pix->pixelformat >>  8) & 0xff,
@@ -284,20 +283,14 @@ static void v4l_print_format(const void *arg, bool write_only)
        case V4L2_BUF_TYPE_VIDEO_OVERLAY:
        case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
                win = &p->fmt.win;
-               pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, "
-                       "chromakey=0x%08x, bitmap=%p, "
-                       "global_alpha=0x%02x\n",
-                       win->w.width, win->w.height,
-                       win->w.left, win->w.top,
+               /* Note: we can't print the clip list here since the clips
+                * pointer is a userspace pointer, not a kernelspace
+                * pointer. */
+               pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
+                       win->w.width, win->w.height, win->w.left, win->w.top,
                        prt_names(win->field, v4l2_field_names),
-                       win->chromakey, win->bitmap, win->global_alpha);
-               clip = win->clips;
-               for (i = 0; i < win->clipcount; i++) {
-                       printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
-                                       i, clip->c.width, clip->c.height,
-                                       clip->c.left, clip->c.top);
-                       clip = clip->next;
-               }
+                       win->chromakey, win->clipcount, win->clips,
+                       win->bitmap, win->global_alpha);
                break;
        case V4L2_BUF_TYPE_VBI_CAPTURE:
        case V4L2_BUF_TYPE_VBI_OUTPUT:
@@ -332,7 +325,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
 
        pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
                "height=%u, pixelformat=%c%c%c%c, "
-               "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+               "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
                        p->capability, p->flags, p->base,
                        p->fmt.width, p->fmt.height,
                        (p->fmt.pixelformat & 0xff),
@@ -353,7 +346,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
        const struct v4l2_modulator *p = arg;
 
        if (write_only)
-               pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans);
+               pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
        else
                pr_cont("index=%u, name=%.*s, capability=0x%x, "
                        "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
@@ -445,13 +438,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
                for (i = 0; i < p->length; ++i) {
                        plane = &p->m.planes[i];
                        printk(KERN_DEBUG
-                               "plane %d: bytesused=%d, data_offset=0x%08x "
+                               "plane %d: bytesused=%d, data_offset=0x%08x, "
                                "offset/userptr=0x%lx, length=%d\n",
                                i, plane->bytesused, plane->data_offset,
                                plane->m.userptr, plane->length);
                }
        } else {
-               pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
+               pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
                        p->bytesused, p->m.userptr, p->length);
        }
 
@@ -504,6 +497,8 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
                        c->capability, c->outputmode,
                        c->timeperframe.numerator, c->timeperframe.denominator,
                        c->extendedmode, c->writebuffers);
+       } else {
+               pr_cont("\n");
        }
 }
 
@@ -734,11 +729,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
                        p->type);
        switch (p->type) {
        case V4L2_FRMSIZE_TYPE_DISCRETE:
-               pr_cont(" wxh=%ux%u\n",
+               pr_cont(", wxh=%ux%u\n",
                        p->discrete.width, p->discrete.height);
                break;
        case V4L2_FRMSIZE_TYPE_STEPWISE:
-               pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n",
+               pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
                                p->stepwise.min_width,  p->stepwise.min_height,
                                p->stepwise.step_width, p->stepwise.step_height,
                                p->stepwise.max_width,  p->stepwise.max_height);
@@ -764,12 +759,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
                        p->width, p->height, p->type);
        switch (p->type) {
        case V4L2_FRMIVAL_TYPE_DISCRETE:
-               pr_cont(" fps=%d/%d\n",
+               pr_cont(", fps=%d/%d\n",
                                p->discrete.numerator,
                                p->discrete.denominator);
                break;
        case V4L2_FRMIVAL_TYPE_STEPWISE:
-               pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n",
+               pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
                                p->stepwise.min.numerator,
                                p->stepwise.min.denominator,
                                p->stepwise.max.numerator,
@@ -807,8 +802,8 @@ static void v4l_print_event(const void *arg, bool write_only)
                        pr_cont("value64=%lld, ", c->value64);
                else
                        pr_cont("value=%d, ", c->value);
-               pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d,"
-                               " default_value=%d\n",
+               pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
+                       "default_value=%d\n",
                        c->flags, c->minimum, c->maximum,
                        c->step, c->default_value);
                break;
@@ -845,7 +840,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
        const struct v4l2_frequency_band *p = arg;
 
        pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
-                       "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+               "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
                        p->tuner, p->type, p->index,
                        p->capability, p->rangelow,
                        p->rangehigh, p->modulation);
index 66f599fcb8298f70cb18545460b8c8b0df807119..e96497f7c3ed1b598106da2b7aa51f9a46284c68 100644 (file)
@@ -205,7 +205,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
 {
        struct v4l2_m2m_dev *m2m_dev;
-       unsigned long flags_job, flags;
+       unsigned long flags_job, flags_out, flags_cap;
 
        m2m_dev = m2m_ctx->m2m_dev;
        dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -223,23 +223,26 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
                return;
        }
 
-       spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+       spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
        if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
-               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+                                       flags_out);
                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
                dprintk("No input buffers available\n");
                return;
        }
-       spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+       spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
        if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
-               spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
-               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+               spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
+                                       flags_cap);
+               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+                                       flags_out);
                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
                dprintk("No output buffers available\n");
                return;
        }
-       spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
-       spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+       spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+       spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 
        if (m2m_dev->m2m_ops->job_ready
                && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -371,6 +374,20 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
 
+/**
+ * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
+ * on the type
+ */
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+                        struct v4l2_create_buffers *create)
+{
+       struct vb2_queue *vq;
+
+       vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
+       return vb2_create_bufs(vq, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
+
 /**
  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
  * the type
@@ -486,8 +503,10 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
        if (m2m_ctx->m2m_dev->m2m_ops->unlock)
                m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
 
-       poll_wait(file, &src_q->done_wq, wait);
-       poll_wait(file, &dst_q->done_wq, wait);
+       if (list_empty(&src_q->done_list))
+               poll_wait(file, &src_q->done_wq, wait);
+       if (list_empty(&dst_q->done_list))
+               poll_wait(file, &dst_q->done_wq, wait);
 
        if (m2m_ctx->m2m_dev->m2m_ops->lock)
                m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
index 7d833eefaf4e9bf2124f035db614179127268e4a..e3bdc3be91e12822bcf92c4538433522a2aaa886 100644 (file)
@@ -2014,7 +2014,8 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
        if (list_empty(&q->queued_list))
                return res | POLLERR;
 
-       poll_wait(file, &q->done_wq, wait);
+       if (list_empty(&q->done_list))
+               poll_wait(file, &q->done_wq, wait);
 
        /*
         * Take first buffer available for dequeuing.
index 721b9186a5d1cdeb3679bbd34ba786d5d9e996e8..4b93ed4d5cd6a3649efdaa2c0eedc191792ab7c0 100644 (file)
@@ -107,7 +107,7 @@ static struct mfd_cell tps6586x_cell[] = {
                .name = "tps6586x-gpio",
        },
        {
-               .name = "tps6586x-pmic",
+               .name = "tps6586x-regulator",
        },
        {
                .name = "tps6586x-rtc",
index 02d9ae7d527e14894d71e67e93372773ad9e5ea0..f975696135265a68a5946d7f784d4688f4ad0d30 100644 (file)
@@ -2413,7 +2413,8 @@ static void bond_miimon_commit(struct bonding *bond)
 
                        pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
                                bond->dev->name, slave->dev->name,
-                               slave->speed, slave->duplex ? "full" : "half");
+                               slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
+                               slave->duplex ? "full" : "half");
 
                        /* notify ad that the link status has changed */
                        if (bond->params.mode == BOND_MODE_8023AD)
index 6e15ef08f301fe385dec2727bdc6f92dded28e89..cbd388eea68271c8ab7eaa2581ffb9145a4be429 100644 (file)
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
        err = usb_8dev_cmd_version(priv, &version);
        if (err) {
                netdev_err(netdev, "can't get firmware version\n");
-               goto cleanup_cmd_msg_buffer;
+               goto cleanup_unregister_candev;
        } else {
                netdev_info(netdev,
                         "firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
 
        return 0;
 
+cleanup_unregister_candev:
+       unregister_netdev(priv->netdev);
+
 cleanup_cmd_msg_buffer:
        kfree(priv->cmd_msg_buffer);
 
index 36d6abd1cfff5a147c6ce79f18f2dccd9cac16cd..ad6aa1e98348cef47bfd229bc58375f0ef9396c6 100644 (file)
@@ -67,4 +67,22 @@ config ATL1C
          To compile this driver as a module, choose M here.  The module
          will be called atl1c.
 
+config ALX
+       tristate "Qualcomm Atheros AR816x/AR817x support"
+       depends on PCI
+       select CRC32
+       select NET_CORE
+       select MDIO
+       help
+         This driver supports the Qualcomm Atheros L1F ethernet adapter,
+         i.e. the following chipsets:
+
+         1969:1091 - AR8161 Gigabit Ethernet
+         1969:1090 - AR8162 Fast Ethernet
+         1969:10A1 - AR8171 Gigabit Ethernet
+         1969:10A0 - AR8172 Fast Ethernet
+
+         To compile this driver as a module, choose M here.  The module
+         will be called alx.
+
 endif # NET_VENDOR_ATHEROS
index e7e76fb576ff5c9db197936935ba5c20a0c9aa2a..5cf1c65bbce9877ca243a115aff1c2ee57257b4c 100644 (file)
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
 obj-$(CONFIG_ATL2) += atlx/
 obj-$(CONFIG_ATL1E) += atl1e/
 obj-$(CONFIG_ATL1C) += atl1c/
+obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644 (file)
index 0000000..5901fa4
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ALX) += alx.o
+alx-objs := main.o ethtool.o hw.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644 (file)
index 0000000..50b3ae2
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_H_
+#define _ALX_H_
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include "hw.h"
+
+#define ALX_WATCHDOG_TIME   (5 * HZ)
+
+struct alx_buffer {
+       struct sk_buff *skb;
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(size);
+};
+
+struct alx_rx_queue {
+       struct alx_rrd *rrd;
+       dma_addr_t rrd_dma;
+
+       struct alx_rfd *rfd;
+       dma_addr_t rfd_dma;
+
+       struct alx_buffer *bufs;
+
+       u16 write_idx, read_idx;
+       u16 rrd_read_idx;
+};
+#define ALX_RX_ALLOC_THRESH    32
+
+struct alx_tx_queue {
+       struct alx_txd *tpd;
+       dma_addr_t tpd_dma;
+       struct alx_buffer *bufs;
+       u16 write_idx, read_idx;
+};
+
+#define ALX_DEFAULT_TX_WORK 128
+
+enum alx_device_quirks {
+       ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
+};
+
+struct alx_priv {
+       struct net_device *dev;
+
+       struct alx_hw hw;
+
+       /* all descriptor memory */
+       struct {
+               dma_addr_t dma;
+               void *virt;
+               int size;
+       } descmem;
+
+       /* protect int_mask updates */
+       spinlock_t irq_lock;
+       u32 int_mask;
+
+       int tx_ringsz;
+       int rx_ringsz;
+       int rxbuf_size;
+
+       struct napi_struct napi;
+       struct alx_tx_queue txq;
+       struct alx_rx_queue rxq;
+
+       struct work_struct link_check_wk;
+       struct work_struct reset_wk;
+
+       u16 msg_enable;
+
+       bool msi;
+};
+
+extern const struct ethtool_ops alx_ethtool_ops;
+extern const char alx_drv_name[];
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644 (file)
index 0000000..6fa2aec
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#include "alx.h"
+#include "reg.h"
+#include "hw.h"
+
+
+static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       ecmd->supported = SUPPORTED_10baseT_Half |
+                         SUPPORTED_10baseT_Full |
+                         SUPPORTED_100baseT_Half |
+                         SUPPORTED_100baseT_Full |
+                         SUPPORTED_Autoneg |
+                         SUPPORTED_TP |
+                         SUPPORTED_Pause;
+       if (alx_hw_giga(hw))
+               ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+       ecmd->advertising = ADVERTISED_TP;
+       if (hw->adv_cfg & ADVERTISED_Autoneg)
+               ecmd->advertising |= hw->adv_cfg;
+
+       ecmd->port = PORT_TP;
+       ecmd->phy_address = 0;
+       if (hw->adv_cfg & ADVERTISED_Autoneg)
+               ecmd->autoneg = AUTONEG_ENABLE;
+       else
+               ecmd->autoneg = AUTONEG_DISABLE;
+       ecmd->transceiver = XCVR_INTERNAL;
+
+       if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
+               if (hw->flowctrl & ALX_FC_RX) {
+                       ecmd->advertising |= ADVERTISED_Pause;
+
+                       if (!(hw->flowctrl & ALX_FC_TX))
+                               ecmd->advertising |= ADVERTISED_Asym_Pause;
+               } else if (hw->flowctrl & ALX_FC_TX) {
+                       ecmd->advertising |= ADVERTISED_Asym_Pause;
+               }
+       }
+
+       if (hw->link_speed != SPEED_UNKNOWN) {
+               ethtool_cmd_speed_set(ecmd,
+                                     hw->link_speed - hw->link_speed % 10);
+               ecmd->duplex = hw->link_speed % 10;
+       } else {
+               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               ecmd->duplex = DUPLEX_UNKNOWN;
+       }
+
+       return 0;
+}
+
+static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       u32 adv_cfg;
+
+       ASSERT_RTNL();
+
+       if (ecmd->autoneg == AUTONEG_ENABLE) {
+               if (ecmd->advertising & ADVERTISED_1000baseT_Half)
+                       return -EINVAL;
+               adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
+       } else {
+               int speed = ethtool_cmd_speed(ecmd);
+
+               switch (speed + ecmd->duplex) {
+               case SPEED_10 + DUPLEX_HALF:
+                       adv_cfg = ADVERTISED_10baseT_Half;
+                       break;
+               case SPEED_10 + DUPLEX_FULL:
+                       adv_cfg = ADVERTISED_10baseT_Full;
+                       break;
+               case SPEED_100 + DUPLEX_HALF:
+                       adv_cfg = ADVERTISED_100baseT_Half;
+                       break;
+               case SPEED_100 + DUPLEX_FULL:
+                       adv_cfg = ADVERTISED_100baseT_Full;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       hw->adv_cfg = adv_cfg;
+       return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+}
+
+static void alx_get_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *pause)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       if (hw->flowctrl & ALX_FC_ANEG &&
+           hw->adv_cfg & ADVERTISED_Autoneg)
+               pause->autoneg = AUTONEG_ENABLE;
+       else
+               pause->autoneg = AUTONEG_DISABLE;
+
+       if (hw->flowctrl & ALX_FC_TX)
+               pause->tx_pause = 1;
+       else
+               pause->tx_pause = 0;
+
+       if (hw->flowctrl & ALX_FC_RX)
+               pause->rx_pause = 1;
+       else
+               pause->rx_pause = 0;
+}
+
+
+static int alx_set_pauseparam(struct net_device *netdev,
+                             struct ethtool_pauseparam *pause)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       int err = 0;
+       bool reconfig_phy = false;
+       u8 fc = 0;
+
+       if (pause->tx_pause)
+               fc |= ALX_FC_TX;
+       if (pause->rx_pause)
+               fc |= ALX_FC_RX;
+       if (pause->autoneg)
+               fc |= ALX_FC_ANEG;
+
+       ASSERT_RTNL();
+
+       /* restart auto-neg for auto-mode */
+       if (hw->adv_cfg & ADVERTISED_Autoneg) {
+               if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
+                       reconfig_phy = true;
+               if (fc & hw->flowctrl & ALX_FC_ANEG &&
+                   (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+                       reconfig_phy = true;
+       }
+
+       if (reconfig_phy) {
+               err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
+               return err;
+       }
+
+       /* flow control on mac */
+       if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+               alx_cfg_mac_flowcontrol(hw, fc);
+
+       hw->flowctrl = fc;
+
+       return 0;
+}
+
+static u32 alx_get_msglevel(struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       return alx->msg_enable;
+}
+
+static void alx_set_msglevel(struct net_device *netdev, u32 data)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       alx->msg_enable = data;
+}
+
+static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       wol->supported = WAKE_MAGIC | WAKE_PHY;
+       wol->wolopts = 0;
+
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+               wol->wolopts |= WAKE_MAGIC;
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
+               wol->wolopts |= WAKE_PHY;
+}
+
+static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+                           WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+               return -EOPNOTSUPP;
+
+       hw->sleep_ctrl = 0;
+
+       if (wol->wolopts & WAKE_MAGIC)
+               hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
+       if (wol->wolopts & WAKE_PHY)
+               hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
+
+       device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
+
+       return 0;
+}
+
+static void alx_get_drvinfo(struct net_device *netdev,
+                           struct ethtool_drvinfo *drvinfo)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+const struct ethtool_ops alx_ethtool_ops = {
+       .get_settings   = alx_get_settings,
+       .set_settings   = alx_set_settings,
+       .get_pauseparam = alx_get_pauseparam,
+       .set_pauseparam = alx_set_pauseparam,
+       .get_drvinfo    = alx_get_drvinfo,
+       .get_msglevel   = alx_get_msglevel,
+       .set_msglevel   = alx_set_msglevel,
+       .get_wol        = alx_get_wol,
+       .set_wol        = alx_set_wol,
+       .get_link       = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644 (file)
index 0000000..220a16a
--- /dev/null
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+#include "reg.h"
+#include "hw.h"
+
+static inline bool alx_is_rev_a(u8 rev)
+{
+       return rev == ALX_REV_A0 || rev == ALX_REV_A1;
+}
+
+static int alx_wait_mdio_idle(struct alx_hw *hw)
+{
+       u32 val;
+       int i;
+
+       for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
+               val = alx_read_mem32(hw, ALX_MDIO);
+               if (!(val & ALX_MDIO_BUSY))
+                       return 0;
+               udelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+                            u16 reg, u16 *phy_data)
+{
+       u32 val, clk_sel;
+       int err;
+
+       *phy_data = 0;
+
+       /* use slow clock when it's in hibernation status */
+       clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+                       ALX_MDIO_CLK_SEL_25MD4 :
+                       ALX_MDIO_CLK_SEL_25MD128;
+
+       if (ext) {
+               val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+                     reg << ALX_MDIO_EXTN_REG_SHIFT;
+               alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+               val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
+                     ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
+       } else {
+               val = ALX_MDIO_SPRES_PRMBL |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+                     reg << ALX_MDIO_REG_SHIFT |
+                     ALX_MDIO_START | ALX_MDIO_OP_READ;
+       }
+       alx_write_mem32(hw, ALX_MDIO, val);
+
+       err = alx_wait_mdio_idle(hw);
+       if (err)
+               return err;
+       val = alx_read_mem32(hw, ALX_MDIO);
+       *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
+       return 0;
+}
+
+static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+                             u16 reg, u16 phy_data)
+{
+       u32 val, clk_sel;
+
+       /* use slow clock when it's in hibernation status */
+       clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+                       ALX_MDIO_CLK_SEL_25MD4 :
+                       ALX_MDIO_CLK_SEL_25MD128;
+
+       if (ext) {
+               val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+                     reg << ALX_MDIO_EXTN_REG_SHIFT;
+               alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+               val = ALX_MDIO_SPRES_PRMBL |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+                     phy_data << ALX_MDIO_DATA_SHIFT |
+                     ALX_MDIO_START | ALX_MDIO_MODE_EXT;
+       } else {
+               val = ALX_MDIO_SPRES_PRMBL |
+                     clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+                     reg << ALX_MDIO_REG_SHIFT |
+                     phy_data << ALX_MDIO_DATA_SHIFT |
+                     ALX_MDIO_START;
+       }
+       alx_write_mem32(hw, ALX_MDIO, val);
+
+       return alx_wait_mdio_idle(hw);
+}
+
+static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+       return alx_read_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+       return alx_write_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+       return alx_read_phy_core(hw, true, dev, reg, pdata);
+}
+
+static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+       return alx_write_phy_core(hw, true, dev, reg, data);
+}
+
+static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+       int err;
+
+       err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+       if (err)
+               return err;
+
+       return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
+}
+
+static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+       int err;
+
+       err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+       if (err)
+               return err;
+
+       return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
+}
+
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_read_phy_reg(hw, reg, phy_data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_write_phy_reg(hw, reg, phy_data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_read_phy_ext(hw, dev, reg, pdata);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_write_phy_ext(hw, dev, reg, data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_read_phy_dbg(hw, reg, pdata);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+       int err;
+
+       spin_lock(&hw->mdio_lock);
+       err = __alx_write_phy_dbg(hw, reg, data);
+       spin_unlock(&hw->mdio_lock);
+
+       return err;
+}
+
+static u16 alx_get_phy_config(struct alx_hw *hw)
+{
+       u32 val;
+       u16 phy_val;
+
+       val = alx_read_mem32(hw, ALX_PHY_CTRL);
+       /* phy in reset */
+       if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
+               return ALX_DRV_PHY_UNKNOWN;
+
+       val = alx_read_mem32(hw, ALX_DRV);
+       val = ALX_GET_FIELD(val, ALX_DRV_PHY);
+       if (ALX_DRV_PHY_UNKNOWN == val)
+               return ALX_DRV_PHY_UNKNOWN;
+
+       alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
+       if (ALX_PHY_INITED == phy_val)
+               return val;
+
+       return ALX_DRV_PHY_UNKNOWN;
+}
+
+static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
+{
+       u32 read;
+       int i;
+
+       for (i = 0; i < ALX_SLD_MAX_TO; i++) {
+               read = alx_read_mem32(hw, reg);
+               if ((read & wait) == 0) {
+                       if (val)
+                               *val = read;
+                       return true;
+               }
+               mdelay(1);
+       }
+
+       return false;
+}
+
+static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
+{
+       u32 mac0, mac1;
+
+       mac0 = alx_read_mem32(hw, ALX_STAD0);
+       mac1 = alx_read_mem32(hw, ALX_STAD1);
+
+       /* addr should be big-endian */
+       *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
+       *(__be16 *)addr = cpu_to_be16(mac1);
+
+       return is_valid_ether_addr(addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+       u32 val;
+
+       /* try to get it from register first */
+       if (alx_read_macaddr(hw, addr))
+               return 0;
+
+       /* try to load from efuse */
+       if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
+               return -EIO;
+       alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
+       if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
+               return -EIO;
+       if (alx_read_macaddr(hw, addr))
+               return 0;
+
+       /* try to load from flash/eeprom (if present) */
+       val = alx_read_mem32(hw, ALX_EFLD);
+       if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
+               if (!alx_wait_reg(hw, ALX_EFLD,
+                                 ALX_EFLD_STAT | ALX_EFLD_START, &val))
+                       return -EIO;
+               alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
+               if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
+                       return -EIO;
+               if (alx_read_macaddr(hw, addr))
+                       return 0;
+       }
+
+       return -EIO;
+}
+
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
+{
+       u32 val;
+
+       /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
+       val = be32_to_cpu(*(__be32 *)(addr + 2));
+       alx_write_mem32(hw, ALX_STAD0, val);
+       val = be16_to_cpu(*(__be16 *)addr);
+       alx_write_mem32(hw, ALX_STAD1, val);
+}
+
+static void alx_enable_osc(struct alx_hw *hw)
+{
+       u32 val;
+
+       /* rising edge */
+       val = alx_read_mem32(hw, ALX_MISC);
+       alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
+       alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+}
+
+static void alx_reset_osc(struct alx_hw *hw, u8 rev)
+{
+       u32 val, val2;
+
+       /* clear Internal OSC settings, switching OSC by hw itself */
+       val = alx_read_mem32(hw, ALX_MISC3);
+       alx_write_mem32(hw, ALX_MISC3,
+                       (val & ~ALX_MISC3_25M_BY_SW) |
+                       ALX_MISC3_25M_NOTO_INTNL);
+
+       /* 25M clk from chipset may be unstable 1s after de-assert of
+        * PERST, driver need re-calibrate before enter Sleep for WoL
+        */
+       val = alx_read_mem32(hw, ALX_MISC);
+       if (rev >= ALX_REV_B0) {
+               /* restore over current protection def-val,
+                * this val could be reset by MAC-RST
+                */
+               ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
+               /* a 0->1 change will update the internal val of osc */
+               val &= ~ALX_MISC_INTNLOSC_OPEN;
+               alx_write_mem32(hw, ALX_MISC, val);
+               alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+               /* hw will automatically dis OSC after cab. */
+               val2 = alx_read_mem32(hw, ALX_MSIC2);
+               val2 &= ~ALX_MSIC2_CALB_START;
+               alx_write_mem32(hw, ALX_MSIC2, val2);
+               alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
+       } else {
+               val &= ~ALX_MISC_INTNLOSC_OPEN;
+               /* disable isolate for rev A devices */
+               if (alx_is_rev_a(rev))
+                       val &= ~ALX_MISC_ISO_EN;
+
+               alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+               alx_write_mem32(hw, ALX_MISC, val);
+       }
+
+       udelay(20);
+}
+
+static int alx_stop_mac(struct alx_hw *hw)
+{
+       u32 rxq, txq, val;
+       u16 i;
+
+       rxq = alx_read_mem32(hw, ALX_RXQ0);
+       alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
+       txq = alx_read_mem32(hw, ALX_TXQ0);
+       alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
+
+       udelay(40);
+
+       hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+       for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+               val = alx_read_mem32(hw, ALX_MAC_STS);
+               if (!(val & ALX_MAC_STS_IDLE))
+                       return 0;
+               udelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
+int alx_reset_mac(struct alx_hw *hw)
+{
+       u32 val, pmctrl;
+       int i, ret;
+       u8 rev;
+       bool a_cr;
+
+       pmctrl = 0;
+       rev = alx_hw_revision(hw);
+       a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
+
+       /* disable all interrupts, RXQ/TXQ */
+       alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
+       alx_write_mem32(hw, ALX_IMR, 0);
+       alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+
+       ret = alx_stop_mac(hw);
+       if (ret)
+               return ret;
+
+       /* mac reset workaroud */
+       alx_write_mem32(hw, ALX_RFD_PIDX, 1);
+
+       /* dis l0s/l1 before mac reset */
+       if (a_cr) {
+               pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+               if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+                       alx_write_mem32(hw, ALX_PMCTRL,
+                                       pmctrl & ~(ALX_PMCTRL_L1_EN |
+                                                  ALX_PMCTRL_L0S_EN));
+       }
+
+       /* reset whole mac safely */
+       val = alx_read_mem32(hw, ALX_MASTER);
+       alx_write_mem32(hw, ALX_MASTER,
+                       val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
+
+       /* make sure it's real idle */
+       udelay(10);
+       for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+               val = alx_read_mem32(hw, ALX_RFD_PIDX);
+               if (val == 0)
+                       break;
+               udelay(10);
+       }
+       for (; i < ALX_DMA_MAC_RST_TO; i++) {
+               val = alx_read_mem32(hw, ALX_MASTER);
+               if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
+                       break;
+               udelay(10);
+       }
+       if (i == ALX_DMA_MAC_RST_TO)
+               return -EIO;
+       udelay(10);
+
+       if (a_cr) {
+               alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
+               /* restore l0s / l1 */
+               if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+                       alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+       }
+
+       alx_reset_osc(hw, rev);
+
+       /* clear Internal OSC settings, switching OSC by hw itself,
+        * disable isolate for rev A devices
+        */
+       val = alx_read_mem32(hw, ALX_MISC3);
+       alx_write_mem32(hw, ALX_MISC3,
+                       (val & ~ALX_MISC3_25M_BY_SW) |
+                       ALX_MISC3_25M_NOTO_INTNL);
+       val = alx_read_mem32(hw, ALX_MISC);
+       val &= ~ALX_MISC_INTNLOSC_OPEN;
+       if (alx_is_rev_a(rev))
+               val &= ~ALX_MISC_ISO_EN;
+       alx_write_mem32(hw, ALX_MISC, val);
+       udelay(20);
+
+       /* driver control speed/duplex, hash-alg */
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+       val = alx_read_mem32(hw, ALX_SERDES);
+       alx_write_mem32(hw, ALX_SERDES,
+                       val | ALX_SERDES_MACCLK_SLWDWN |
+                       ALX_SERDES_PHYCLK_SLWDWN);
+
+       return 0;
+}
+
+void alx_reset_phy(struct alx_hw *hw)
+{
+       int i;
+       u32 val;
+       u16 phy_val;
+
+       /* (DSP)reset PHY core */
+       val = alx_read_mem32(hw, ALX_PHY_CTRL);
+       val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
+                ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
+                ALX_PHY_CTRL_CLS);
+       val |= ALX_PHY_CTRL_RST_ANALOG;
+
+       val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
+       alx_write_mem32(hw, ALX_PHY_CTRL, val);
+       udelay(10);
+       alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
+
+       for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
+               udelay(10);
+
+       /* phy power saving & hib */
+       alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
+                         ALX_SYSMODCTRL_IECHOADJ_DEF);
+       alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
+                         ALX_VDRVBIAS_DEF);
+
+       /* EEE advertisement */
+       val = alx_read_mem32(hw, ALX_LPI_CTRL);
+       alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
+       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
+
+       /* phy power saving */
+       alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
+       alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+       alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+                         phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
+       /* rtl8139c, 120m issue */
+       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
+                         ALX_MIIEXT_NLP78_120M_DEF);
+       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
+                         ALX_MIIEXT_S3DIG10_DEF);
+
+       if (hw->lnk_patch) {
+               /* Turn off half amplitude */
+               alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+                                &phy_val);
+               alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+                                 phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
+               /* Turn off Green feature */
+               alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+               alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+                                 phy_val | ALX_GREENCFG2_BP_GREEN);
+               /* Turn off half Bias */
+               alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+                                &phy_val);
+               alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+                                 phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
+       }
+
+       /* set phy interrupt mask */
+       alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
+}
+
+#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
+void alx_reset_pcie(struct alx_hw *hw)
+{
+       u8 rev = alx_hw_revision(hw);
+       u32 val;
+       u16 val16;
+
+       /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+       pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
+       if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
+               val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
+               pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
+       }
+
+       /* clear WoL setting/status */
+       val = alx_read_mem32(hw, ALX_WOL0);
+       alx_write_mem32(hw, ALX_WOL0, 0);
+
+       val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+       alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
+
+       /* mask some pcie error bits */
+       val = alx_read_mem32(hw, ALX_UE_SVRT);
+       val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
+       alx_write_mem32(hw, ALX_UE_SVRT, val);
+
+       /* wol 25M & pclk */
+       val = alx_read_mem32(hw, ALX_MASTER);
+       if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
+               if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+                   (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
+                       alx_write_mem32(hw, ALX_MASTER,
+                                       val | ALX_MASTER_PCLKSEL_SRDS |
+                                       ALX_MASTER_WAKEN_25M);
+       } else {
+               if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+                   (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
+                       alx_write_mem32(hw, ALX_MASTER,
+                                       (val & ~ALX_MASTER_PCLKSEL_SRDS) |
+                                       ALX_MASTER_WAKEN_25M);
+       }
+
+       /* ASPM setting */
+       alx_enable_aspm(hw, true, true);
+
+       udelay(10);
+}
+
+void alx_start_mac(struct alx_hw *hw)
+{
+       u32 mac, txq, rxq;
+
+       rxq = alx_read_mem32(hw, ALX_RXQ0);
+       alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
+       txq = alx_read_mem32(hw, ALX_TXQ0);
+       alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
+
+       mac = hw->rx_ctrl;
+       if (hw->link_speed % 10 == DUPLEX_FULL)
+               mac |= ALX_MAC_CTRL_FULLD;
+       else
+               mac &= ~ALX_MAC_CTRL_FULLD;
+       ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+                     hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
+                                                    ALX_MAC_CTRL_SPEED_10_100);
+       mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
+       hw->rx_ctrl = mac;
+       alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+}
+
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
+{
+       if (fc & ALX_FC_RX)
+               hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
+       else
+               hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
+
+       if (fc & ALX_FC_TX)
+               hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
+       else
+               hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
+
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+       u32 pmctrl;
+       u8 rev = alx_hw_revision(hw);
+
+       pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+
+       ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
+                     ALX_PMCTRL_LCKDET_TIMER_DEF);
+       pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
+                 ALX_PMCTRL_L1_CLKSW_EN |
+                 ALX_PMCTRL_L1_SRDSRX_PWD;
+       ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
+       ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
+       pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
+                   ALX_PMCTRL_L1_SRDSPLL_EN |
+                   ALX_PMCTRL_L1_BUFSRX_EN |
+                   ALX_PMCTRL_SADLY_EN |
+                   ALX_PMCTRL_HOTRST_WTEN|
+                   ALX_PMCTRL_L0S_EN |
+                   ALX_PMCTRL_L1_EN |
+                   ALX_PMCTRL_ASPM_FCEN |
+                   ALX_PMCTRL_TXL1_AFTER_L0S |
+                   ALX_PMCTRL_RXL1_AFTER_L0S);
+       if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
+               pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
+
+       if (l0s_en)
+               pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
+       if (l1_en)
+               pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
+
+       alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+}
+
+
+static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
+{
+       u32 cfg = 0;
+
+       if (ethadv_cfg & ADVERTISED_Autoneg) {
+               cfg |= ALX_DRV_PHY_AUTO;
+               if (ethadv_cfg & ADVERTISED_10baseT_Half)
+                       cfg |= ALX_DRV_PHY_10;
+               if (ethadv_cfg & ADVERTISED_10baseT_Full)
+                       cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+               if (ethadv_cfg & ADVERTISED_100baseT_Half)
+                       cfg |= ALX_DRV_PHY_100;
+               if (ethadv_cfg & ADVERTISED_100baseT_Full)
+                       cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+               if (ethadv_cfg & ADVERTISED_1000baseT_Half)
+                       cfg |= ALX_DRV_PHY_1000;
+               if (ethadv_cfg & ADVERTISED_1000baseT_Full)
+                       cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+               if (ethadv_cfg & ADVERTISED_Pause)
+                       cfg |= ADVERTISE_PAUSE_CAP;
+               if (ethadv_cfg & ADVERTISED_Asym_Pause)
+                       cfg |= ADVERTISE_PAUSE_ASYM;
+       } else {
+               switch (ethadv_cfg) {
+               case ADVERTISED_10baseT_Half:
+                       cfg |= ALX_DRV_PHY_10;
+                       break;
+               case ADVERTISED_100baseT_Half:
+                       cfg |= ALX_DRV_PHY_100;
+                       break;
+               case ADVERTISED_10baseT_Full:
+                       cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+                       break;
+               case ADVERTISED_100baseT_Full:
+                       cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+                       break;
+               }
+       }
+
+       return cfg;
+}
+
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
+{
+       u16 adv, giga, cr;
+       u32 val;
+       int err = 0;
+
+       alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
+       val = alx_read_mem32(hw, ALX_DRV);
+       ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
+
+       if (ethadv & ADVERTISED_Autoneg) {
+               adv = ADVERTISE_CSMA;
+               adv |= ethtool_adv_to_mii_adv_t(ethadv);
+
+               if (flowctrl & ALX_FC_ANEG) {
+                       if (flowctrl & ALX_FC_RX) {
+                               adv |= ADVERTISED_Pause;
+                               if (!(flowctrl & ALX_FC_TX))
+                                       adv |= ADVERTISED_Asym_Pause;
+                       } else if (flowctrl & ALX_FC_TX) {
+                               adv |= ADVERTISED_Asym_Pause;
+                       }
+               }
+               giga = 0;
+               if (alx_hw_giga(hw))
+                       giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
+
+               cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+
+               if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
+                   alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
+                   alx_write_phy_reg(hw, MII_BMCR, cr))
+                       err = -EBUSY;
+       } else {
+               cr = BMCR_RESET;
+               if (ethadv == ADVERTISED_100baseT_Half ||
+                   ethadv == ADVERTISED_100baseT_Full)
+                       cr |= BMCR_SPEED100;
+               if (ethadv == ADVERTISED_10baseT_Full ||
+                   ethadv == ADVERTISED_100baseT_Full)
+                       cr |= BMCR_FULLDPLX;
+
+               err = alx_write_phy_reg(hw, MII_BMCR, cr);
+       }
+
+       if (!err) {
+               alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
+               val |= ethadv_to_hw_cfg(hw, ethadv);
+       }
+
+       alx_write_mem32(hw, ALX_DRV, val);
+
+       return err;
+}
+
+
+void alx_post_phy_link(struct alx_hw *hw)
+{
+       u16 phy_val, len, agc;
+       u8 revid = alx_hw_revision(hw);
+       bool adj_th = revid == ALX_REV_B0;
+       int speed;
+
+       if (hw->link_speed == SPEED_UNKNOWN)
+               speed = SPEED_UNKNOWN;
+       else
+               speed = hw->link_speed - hw->link_speed % 10;
+
+       if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
+               return;
+
+       /* 1000BT/AZ, wrong cable length */
+       if (speed != SPEED_UNKNOWN) {
+               alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
+                                &phy_val);
+               len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
+               alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
+               agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
+
+               if ((speed == SPEED_1000 &&
+                    (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
+                     (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
+                   (speed == SPEED_100 &&
+                    (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
+                     (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+                                         ALX_AZ_ANADECT_LONG);
+                       alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                        &phy_val);
+                       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                         phy_val | ALX_AFE_10BT_100M_TH);
+               } else {
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+                                         ALX_AZ_ANADECT_DEF);
+                       alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
+                                        ALX_MIIEXT_AFE, &phy_val);
+                       alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                         phy_val & ~ALX_AFE_10BT_100M_TH);
+               }
+
+               /* threshold adjust */
+               if (adj_th && hw->lnk_patch) {
+                       if (speed == SPEED_100) {
+                               alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+                                                 ALX_MSE16DB_UP);
+                       } else if (speed == SPEED_1000) {
+                               /*
+                                * Giga link threshold, raise the tolerance of
+                                * noise 50%
+                                */
+                               alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+                                                &phy_val);
+                               ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+                                             ALX_MSE20DB_TH_HI);
+                               alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+                                                 phy_val);
+                       }
+               }
+       } else {
+               alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                &phy_val);
+               alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+                                 phy_val & ~ALX_AFE_10BT_100M_TH);
+
+               if (adj_th && hw->lnk_patch) {
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+                                         ALX_MSE16DB_DOWN);
+                       alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
+                       ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+                                     ALX_MSE20DB_TH_DEF);
+                       alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
+               }
+       }
+}
+
+
+/* NOTE:
+ *    1. phy link must be established before calling this function
+ *    2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+int alx_pre_suspend(struct alx_hw *hw, int speed)
+{
+       u32 master, mac, phy, val;
+       int err = 0;
+
+       master = alx_read_mem32(hw, ALX_MASTER);
+       master &= ~ALX_MASTER_PCLKSEL_SRDS;
+       mac = hw->rx_ctrl;
+       /* 10/100 half */
+       ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,  ALX_MAC_CTRL_SPEED_10_100);
+       mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+
+       phy = alx_read_mem32(hw, ALX_PHY_CTRL);
+       phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
+       phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
+              ALX_PHY_CTRL_HIB_EN;
+
+       /* without any activity  */
+       if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
+               err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+               if (err)
+                       return err;
+               phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
+       } else {
+               if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
+                       mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
+               if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
+                       mac |= ALX_MAC_CTRL_TX_EN;
+               if (speed % 10 == DUPLEX_FULL)
+                       mac |= ALX_MAC_CTRL_FULLD;
+               if (speed >= SPEED_1000)
+                       ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+                                     ALX_MAC_CTRL_SPEED_1000);
+               phy |= ALX_PHY_CTRL_DSPRST_OUT;
+               err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
+                                       ALX_MIIEXT_S3DIG10,
+                                       ALX_MIIEXT_S3DIG10_SL);
+               if (err)
+                       return err;
+       }
+
+       alx_enable_osc(hw);
+       hw->rx_ctrl = mac;
+       alx_write_mem32(hw, ALX_MASTER, master);
+       alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+       alx_write_mem32(hw, ALX_PHY_CTRL, phy);
+
+       /* set val of PDLL D3PLLOFF */
+       val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+       val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
+       alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
+
+       return 0;
+}
+
+bool alx_phy_configured(struct alx_hw *hw)
+{
+       u32 cfg, hw_cfg;
+
+       cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
+       cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
+       hw_cfg = alx_get_phy_config(hw);
+
+       if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
+               return false;
+
+       return cfg == hw_cfg;
+}
+
+int alx_get_phy_link(struct alx_hw *hw, int *speed)
+{
+       struct pci_dev *pdev = hw->pdev;
+       u16 bmsr, giga;
+       int err;
+
+       err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+       if (err)
+               return err;
+
+       err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+       if (err)
+               return err;
+
+       if (!(bmsr & BMSR_LSTATUS)) {
+               *speed = SPEED_UNKNOWN;
+               return 0;
+       }
+
+       /* speed/duplex result is saved in PHY Specific Status Register */
+       err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
+       if (err)
+               return err;
+
+       if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
+               goto wrong_speed;
+
+       switch (giga & ALX_GIGA_PSSR_SPEED) {
+       case ALX_GIGA_PSSR_1000MBS:
+               *speed = SPEED_1000;
+               break;
+       case ALX_GIGA_PSSR_100MBS:
+               *speed = SPEED_100;
+               break;
+       case ALX_GIGA_PSSR_10MBS:
+               *speed = SPEED_10;
+               break;
+       default:
+               goto wrong_speed;
+       }
+
+       *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+       return 1;
+
+wrong_speed:
+       dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
+       return -EINVAL;
+}
+
+int alx_clear_phy_intr(struct alx_hw *hw)
+{
+       u16 isr;
+
+       /* clear interrupt status by reading it */
+       return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
+}
+
+int alx_config_wol(struct alx_hw *hw)
+{
+       u32 wol = 0;
+       int err = 0;
+
+       /* turn on magic packet event */
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+               wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
+
+       /* turn on link up event */
+       if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
+               wol |=  ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
+               /* only link up can wake up */
+               err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
+       }
+       alx_write_mem32(hw, ALX_WOL0, wol);
+
+       return err;
+}
+
+void alx_disable_rss(struct alx_hw *hw)
+{
+       u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
+
+       ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
+       alx_write_mem32(hw, ALX_RXQ0, ctrl);
+}
+
+void alx_configure_basic(struct alx_hw *hw)
+{
+       u32 val, raw_mtu, max_payload;
+       u16 val16;
+       u8 chip_rev = alx_hw_revision(hw);
+
+       alx_set_macaddr(hw, hw->mac_addr);
+
+       alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
+
+       /* idle timeout to switch clk_125M */
+       if (chip_rev >= ALX_REV_B0)
+               alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
+                               ALX_IDLE_DECISN_TIMER_DEF);
+
+       alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
+
+       val = alx_read_mem32(hw, ALX_MASTER);
+       val |= ALX_MASTER_IRQMOD2_EN |
+              ALX_MASTER_IRQMOD1_EN |
+              ALX_MASTER_SYSALVTIMER_EN;
+       alx_write_mem32(hw, ALX_MASTER, val);
+       alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
+                       (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
+       /* intr re-trig timeout */
+       alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
+       /* tpd threshold to trig int */
+       alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
+       alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
+
+       raw_mtu = hw->mtu + ETH_HLEN;
+       alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
+       if (raw_mtu > ALX_MTU_JUMBO_TH)
+               hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
+
+       if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
+               val = (raw_mtu + 8 + 7) >> 3;
+       else
+               val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
+       alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
+
+       max_payload = pcie_get_readrq(hw->pdev) >> 8;
+       /*
+        * if BIOS had changed the default dma read max length,
+        * restore it to default value
+        */
+       if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
+               pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
+
+       val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
+             ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
+             ALX_TXQ0_SUPT_IPOPT |
+             ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
+       alx_write_mem32(hw, ALX_TXQ0, val);
+       val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
+             ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
+             ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
+             ALX_HQTPD_BURST_EN;
+       alx_write_mem32(hw, ALX_HQTPD, val);
+
+       /* rxq, flow control */
+       val = alx_read_mem32(hw, ALX_SRAM5);
+       val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
+       if (val > ALX_SRAM_RXF_LEN_8K) {
+               val16 = ALX_MTU_STD_ALGN >> 3;
+               val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
+       } else {
+               val16 = ALX_MTU_STD_ALGN >> 3;
+               val = (val - ALX_MTU_STD_ALGN) >> 3;
+       }
+       alx_write_mem32(hw, ALX_RXQ2,
+                       val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
+                       val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
+       val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
+             ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
+             ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
+             ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
+             ALX_RXQ0_IPV6_PARSE_EN;
+
+       if (alx_hw_giga(hw))
+               ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
+                             ALX_RXQ0_ASPM_THRESH_100M);
+
+       alx_write_mem32(hw, ALX_RXQ0, val);
+
+       val = alx_read_mem32(hw, ALX_DMA);
+       val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
+             ALX_DMA_RREQ_PRI_DATA |
+             max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
+             ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
+             ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
+             (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
+       alx_write_mem32(hw, ALX_DMA, val);
+
+       /* default multi-tx-q weights */
+       val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
+             4 << ALX_WRR_PRI0_SHIFT |
+             4 << ALX_WRR_PRI1_SHIFT |
+             4 << ALX_WRR_PRI2_SHIFT |
+             4 << ALX_WRR_PRI3_SHIFT;
+       alx_write_mem32(hw, ALX_WRR, val);
+}
+
+static inline u32 alx_speed_to_ethadv(int speed)
+{
+       switch (speed) {
+       case SPEED_1000 + DUPLEX_FULL:
+               return ADVERTISED_1000baseT_Full;
+       case SPEED_100 + DUPLEX_FULL:
+               return ADVERTISED_100baseT_Full;
+       case SPEED_100 + DUPLEX_HALF:
+               return ADVERTISED_10baseT_Half;
+       case SPEED_10 + DUPLEX_FULL:
+               return ADVERTISED_10baseT_Full;
+       case SPEED_10 + DUPLEX_HALF:
+               return ADVERTISED_10baseT_Half;
+       default:
+               return 0;
+       }
+}
+
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
+{
+       int i, err, spd;
+       u16 lpa;
+
+       err = alx_get_phy_link(hw, &spd);
+       if (err < 0)
+               return err;
+
+       if (spd == SPEED_UNKNOWN)
+               return 0;
+
+       err = alx_read_phy_reg(hw, MII_LPA, &lpa);
+       if (err)
+               return err;
+
+       if (!(lpa & LPA_LPACK)) {
+               *speed = spd;
+               return 0;
+       }
+
+       if (lpa & LPA_10FULL)
+               *speed = SPEED_10 + DUPLEX_FULL;
+       else if (lpa & LPA_10HALF)
+               *speed = SPEED_10 + DUPLEX_HALF;
+       else if (lpa & LPA_100FULL)
+               *speed = SPEED_100 + DUPLEX_FULL;
+       else
+               *speed = SPEED_100 + DUPLEX_HALF;
+
+       if (*speed != spd) {
+               err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+               if (err)
+                       return err;
+               err = alx_setup_speed_duplex(hw,
+                                            alx_speed_to_ethadv(*speed) |
+                                            ADVERTISED_Autoneg,
+                                            ALX_FC_ANEG | ALX_FC_RX |
+                                            ALX_FC_TX);
+               if (err)
+                       return err;
+
+               /* wait for linkup */
+               for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+                       int speed2;
+
+                       msleep(100);
+
+                       err = alx_get_phy_link(hw, &speed2);
+                       if (err < 0)
+                               return err;
+                       if (speed2 != SPEED_UNKNOWN)
+                               break;
+               }
+               if (i == ALX_MAX_SETUP_LNK_CYCLE)
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+bool alx_get_phy_info(struct alx_hw *hw)
+{
+       u16  devs1, devs2;
+
+       if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
+           alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
+               return false;
+
+       /* since we haven't PMA/PMD status2 register, we can't
+        * use mdio45_probe function for prtad and mmds.
+        * use fixed MMD3 to get mmds.
+        */
+       if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
+           alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
+               return false;
+       hw->mdio.mmds = devs1 | devs2 << 16;
+
+       return true;
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644 (file)
index 0000000..65e723d
--- /dev/null
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_HW_H_
+#define ALX_HW_H_
+#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/pci.h>
+#include "reg.h"
+
+/* Transmit Packet Descriptor, contains 4 32-bit words.
+ *
+ *   31               16               0
+ *   +----------------+----------------+
+ *   |    vlan-tag    |   buf length   |
+ *   +----------------+----------------+
+ *   |              Word 1             |
+ *   +----------------+----------------+
+ *   |      Word 2: buf addr lo        |
+ *   +----------------+----------------+
+ *   |      Word 3: buf addr hi        |
+ *   +----------------+----------------+
+ *
+ * Word 2 and 3 combine to form a 64-bit buffer address
+ *
+ * Word 1 has three forms, depending on the state of bit 8/12/13:
+ * if bit8 =='1', the definition is just for custom checksum offload.
+ * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
+ *     for the skb is special for LSO V2, Word 2 become total skb length ,
+ *     Word 3 is meaningless.
+ * other condition, the definition is for general skb or ip/tcp/udp
+ *     checksum or LSO(TSO) offload.
+ *
+ * Here is the depiction:
+ *
+ *   0-+                                  0-+
+ *   1 |                                  1 |
+ *   2 |                                  2 |
+ *   3 |    Payload offset                3 |    L4 header offset
+ *   4 |        (7:0)                     4 |        (7:0)
+ *   5 |                                  5 |
+ *   6 |                                  6 |
+ *   7-+                                  7-+
+ *   8      Custom csum enable = 1        8      Custom csum enable = 0
+ *   9      General IPv4 checksum         9      General IPv4 checksum
+ *   10     General TCP checksum          10     General TCP checksum
+ *   11     General UDP checksum          11     General UDP checksum
+ *   12     Large Send Segment enable     12     Large Send Segment enable
+ *   13     Large Send Segment type       13     Large Send Segment type
+ *   14     VLAN tagged                   14     VLAN tagged
+ *   15     Insert VLAN tag               15     Insert VLAN tag
+ *   16     IPv4 packet                   16     IPv4 packet
+ *   17     Ethernet frame type           17     Ethernet frame type
+ *   18-+                                 18-+
+ *   19 |                                 19 |
+ *   20 |                                 20 |
+ *   21 |   Custom csum offset            21 |
+ *   22 |       (25:18)                   22 |
+ *   23 |                                 23 |   MSS (30:18)
+ *   24 |                                 24 |
+ *   25-+                                 25 |
+ *   26-+                                 26 |
+ *   27 |                                 27 |
+ *   28 |   Reserved                      28 |
+ *   29 |                                 29 |
+ *   30-+                                 30-+
+ *   31     End of packet                 31     End of packet
+ */
+struct alx_txd {
+       __le16 len;
+       __le16 vlan_tag;
+       __le32 word1;
+       union {
+               __le64 addr;
+               struct {
+                       __le32 pkt_len;
+                       __le32 resvd;
+               } l;
+       } adrl;
+} __packed;
+
+/* tpd word 1 */
+#define TPD_CXSUMSTART_MASK            0x00FF
+#define TPD_CXSUMSTART_SHIFT           0
+#define TPD_L4HDROFFSET_MASK           0x00FF
+#define TPD_L4HDROFFSET_SHIFT          0
+#define TPD_CXSUM_EN_MASK              0x0001
+#define TPD_CXSUM_EN_SHIFT             8
+#define TPD_IP_XSUM_MASK               0x0001
+#define TPD_IP_XSUM_SHIFT              9
+#define TPD_TCP_XSUM_MASK              0x0001
+#define TPD_TCP_XSUM_SHIFT             10
+#define TPD_UDP_XSUM_MASK              0x0001
+#define TPD_UDP_XSUM_SHIFT             11
+#define TPD_LSO_EN_MASK                        0x0001
+#define TPD_LSO_EN_SHIFT               12
+#define TPD_LSO_V2_MASK                        0x0001
+#define TPD_LSO_V2_SHIFT               13
+#define TPD_VLTAGGED_MASK              0x0001
+#define TPD_VLTAGGED_SHIFT             14
+#define TPD_INS_VLTAG_MASK             0x0001
+#define TPD_INS_VLTAG_SHIFT            15
+#define TPD_IPV4_MASK                  0x0001
+#define TPD_IPV4_SHIFT                 16
+#define TPD_ETHTYPE_MASK               0x0001
+#define TPD_ETHTYPE_SHIFT              17
+#define TPD_CXSUMOFFSET_MASK           0x00FF
+#define TPD_CXSUMOFFSET_SHIFT          18
+#define TPD_MSS_MASK                   0x1FFF
+#define TPD_MSS_SHIFT                  18
+#define TPD_EOP_MASK                   0x0001
+#define TPD_EOP_SHIFT                  31
+
+#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
+
+/* Receive Free Descriptor */
+struct alx_rfd {
+       __le64 addr;            /* data buffer address, length is
+                                * declared in register --- every
+                                * buffer has the same size
+                                */
+} __packed;
+
+/* Receive Return Descriptor, contains 4 32-bit words.
+ *
+ *   31               16               0
+ *   +----------------+----------------+
+ *   |              Word 0             |
+ *   +----------------+----------------+
+ *   |     Word 1: RSS Hash value      |
+ *   +----------------+----------------+
+ *   |              Word 2             |
+ *   +----------------+----------------+
+ *   |              Word 3             |
+ *   +----------------+----------------+
+ *
+ * Word 0 depiction         &            Word 2 depiction:
+ *
+ *   0--+                                 0--+
+ *   1  |                                 1  |
+ *   2  |                                 2  |
+ *   3  |                                 3  |
+ *   4  |                                 4  |
+ *   5  |                                 5  |
+ *   6  |                                 6  |
+ *   7  |    IP payload checksum          7  |     VLAN tag
+ *   8  |         (15:0)                  8  |      (15:0)
+ *   9  |                                 9  |
+ *   10 |                                 10 |
+ *   11 |                                 11 |
+ *   12 |                                 12 |
+ *   13 |                                 13 |
+ *   14 |                                 14 |
+ *   15-+                                 15-+
+ *   16-+                                 16-+
+ *   17 |     Number of RFDs              17 |
+ *   18 |        (19:16)                  18 |
+ *   19-+                                 19 |     Protocol ID
+ *   20-+                                 20 |      (23:16)
+ *   21 |                                 21 |
+ *   22 |                                 22 |
+ *   23 |                                 23-+
+ *   24 |                                 24 |     Reserved
+ *   25 |     Start index of RFD-ring     25-+
+ *   26 |         (31:20)                 26 |     RSS Q-num (27:25)
+ *   27 |                                 27-+
+ *   28 |                                 28-+
+ *   29 |                                 29 |     RSS Hash algorithm
+ *   30 |                                 30 |      (31:28)
+ *   31-+                                 31-+
+ *
+ * Word 3 depiction:
+ *
+ *   0--+
+ *   1  |
+ *   2  |
+ *   3  |
+ *   4  |
+ *   5  |
+ *   6  |
+ *   7  |    Packet length (include FCS)
+ *   8  |         (13:0)
+ *   9  |
+ *   10 |
+ *   11 |
+ *   12 |
+ *   13-+
+ *   14      L4 Header checksum error
+ *   15      IPv4 checksum error
+ *   16      VLAN tagged
+ *   17-+
+ *   18 |    Protocol ID (19:17)
+ *   19-+
+ *   20      Receive error summary
+ *   21      FCS(CRC) error
+ *   22      Frame alignment error
+ *   23      Truncated packet
+ *   24      Runt packet
+ *   25      Incomplete packet due to insufficient rx-desc
+ *   26      Broadcast packet
+ *   27      Multicast packet
+ *   28      Ethernet type (EII or 802.3)
+ *   29      FIFO overflow
+ *   30      Length error (for 802.3, length field mismatch with actual len)
+ *   31      Updated, indicate to driver that this RRD is refreshed.
+ */
+struct alx_rrd {
+       __le32 word0;
+       __le32 rss_hash;
+       __le32 word2;
+       __le32 word3;
+} __packed;
+
+/* rrd word 0 */
+#define RRD_XSUM_MASK          0xFFFF
+#define RRD_XSUM_SHIFT         0
+#define RRD_NOR_MASK           0x000F
+#define RRD_NOR_SHIFT          16
+#define RRD_SI_MASK            0x0FFF
+#define RRD_SI_SHIFT           20
+
+/* rrd word 2 */
+#define RRD_VLTAG_MASK         0xFFFF
+#define RRD_VLTAG_SHIFT                0
+#define RRD_PID_MASK           0x00FF
+#define RRD_PID_SHIFT          16
+/* non-ip packet */
+#define RRD_PID_NONIP          0
+/* ipv4(only) */
+#define RRD_PID_IPV4           1
+/* tcp/ipv6 */
+#define RRD_PID_IPV6TCP                2
+/* tcp/ipv4 */
+#define RRD_PID_IPV4TCP                3
+/* udp/ipv6 */
+#define RRD_PID_IPV6UDP                4
+/* udp/ipv4 */
+#define RRD_PID_IPV4UDP                5
+/* ipv6(only) */
+#define RRD_PID_IPV6           6
+/* LLDP packet */
+#define RRD_PID_LLDP           7
+/* 1588 packet */
+#define RRD_PID_1588           8
+#define RRD_RSSQ_MASK          0x0007
+#define RRD_RSSQ_SHIFT         25
+#define RRD_RSSALG_MASK                0x000F
+#define RRD_RSSALG_SHIFT       28
+#define RRD_RSSALG_TCPV6       0x1
+#define RRD_RSSALG_IPV6                0x2
+#define RRD_RSSALG_TCPV4       0x4
+#define RRD_RSSALG_IPV4                0x8
+
+/* rrd word 3 */
+#define RRD_PKTLEN_MASK                0x3FFF
+#define RRD_PKTLEN_SHIFT       0
+#define RRD_ERR_L4_MASK                0x0001
+#define RRD_ERR_L4_SHIFT       14
+#define RRD_ERR_IPV4_MASK      0x0001
+#define RRD_ERR_IPV4_SHIFT     15
+#define RRD_VLTAGGED_MASK      0x0001
+#define RRD_VLTAGGED_SHIFT     16
+#define RRD_OLD_PID_MASK       0x0007
+#define RRD_OLD_PID_SHIFT      17
+#define RRD_ERR_RES_MASK       0x0001
+#define RRD_ERR_RES_SHIFT      20
+#define RRD_ERR_FCS_MASK       0x0001
+#define RRD_ERR_FCS_SHIFT      21
+#define RRD_ERR_FAE_MASK       0x0001
+#define RRD_ERR_FAE_SHIFT      22
+#define RRD_ERR_TRUNC_MASK     0x0001
+#define RRD_ERR_TRUNC_SHIFT    23
+#define RRD_ERR_RUNT_MASK      0x0001
+#define RRD_ERR_RUNT_SHIFT     24
+#define RRD_ERR_ICMP_MASK      0x0001
+#define RRD_ERR_ICMP_SHIFT     25
+#define RRD_BCAST_MASK         0x0001
+#define RRD_BCAST_SHIFT                26
+#define RRD_MCAST_MASK         0x0001
+#define RRD_MCAST_SHIFT                27
+#define RRD_ETHTYPE_MASK       0x0001
+#define RRD_ETHTYPE_SHIFT      28
+#define RRD_ERR_FIFOV_MASK     0x0001
+#define RRD_ERR_FIFOV_SHIFT    29
+#define RRD_ERR_LEN_MASK       0x0001
+#define RRD_ERR_LEN_SHIFT      30
+#define RRD_UPDATED_MASK       0x0001
+#define RRD_UPDATED_SHIFT      31
+
+
+#define ALX_MAX_SETUP_LNK_CYCLE        50
+
+/* for FlowControl */
+#define ALX_FC_RX              0x01
+#define ALX_FC_TX              0x02
+#define ALX_FC_ANEG            0x04
+
+/* for sleep control */
+#define ALX_SLEEP_WOL_PHY      0x00000001
+#define ALX_SLEEP_WOL_MAGIC    0x00000002
+#define ALX_SLEEP_CIFS         0x00000004
+#define ALX_SLEEP_ACTIVE       (ALX_SLEEP_WOL_PHY | \
+                                ALX_SLEEP_WOL_MAGIC | \
+                                ALX_SLEEP_CIFS)
+
+/* for RSS hash type */
+#define ALX_RSS_HASH_TYPE_IPV4         0x1
+#define ALX_RSS_HASH_TYPE_IPV4_TCP     0x2
+#define ALX_RSS_HASH_TYPE_IPV6         0x4
+#define ALX_RSS_HASH_TYPE_IPV6_TCP     0x8
+#define ALX_RSS_HASH_TYPE_ALL          (ALX_RSS_HASH_TYPE_IPV4 | \
+                                        ALX_RSS_HASH_TYPE_IPV4_TCP | \
+                                        ALX_RSS_HASH_TYPE_IPV6 | \
+                                        ALX_RSS_HASH_TYPE_IPV6_TCP)
+#define ALX_DEF_RXBUF_SIZE     1536
+#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
+#define ALX_MAX_TSO_PKT_SIZE   (7*1024)
+#define ALX_MAX_FRAME_SIZE     ALX_MAX_JUMBO_PKT_SIZE
+#define ALX_MIN_FRAME_SIZE     68
+#define ALX_RAW_MTU(_mtu)      (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+#define ALX_MAX_RX_QUEUES      8
+#define ALX_MAX_TX_QUEUES      4
+#define ALX_MAX_HANDLED_INTRS  5
+
+#define ALX_ISR_MISC           (ALX_ISR_PCIE_LNKDOWN | \
+                                ALX_ISR_DMAW | \
+                                ALX_ISR_DMAR | \
+                                ALX_ISR_SMB | \
+                                ALX_ISR_MANU | \
+                                ALX_ISR_TIMER)
+
+#define ALX_ISR_FATAL          (ALX_ISR_PCIE_LNKDOWN | \
+                                ALX_ISR_DMAW | ALX_ISR_DMAR)
+
+#define ALX_ISR_ALERT          (ALX_ISR_RXF_OV | \
+                                ALX_ISR_TXF_UR | \
+                                ALX_ISR_RFD_UR)
+
+#define ALX_ISR_ALL_QUEUES     (ALX_ISR_TX_Q0 | \
+                                ALX_ISR_TX_Q1 | \
+                                ALX_ISR_TX_Q2 | \
+                                ALX_ISR_TX_Q3 | \
+                                ALX_ISR_RX_Q0 | \
+                                ALX_ISR_RX_Q1 | \
+                                ALX_ISR_RX_Q2 | \
+                                ALX_ISR_RX_Q3 | \
+                                ALX_ISR_RX_Q4 | \
+                                ALX_ISR_RX_Q5 | \
+                                ALX_ISR_RX_Q6 | \
+                                ALX_ISR_RX_Q7)
+
+/* maximum interrupt vectors for msix */
+#define ALX_MAX_MSIX_INTRS     16
+
+#define ALX_GET_FIELD(_data, _field)                                   \
+       (((_data) >> _field ## _SHIFT) & _field ## _MASK)
+
+#define ALX_SET_FIELD(_data, _field, _value)   do {                    \
+               (_data) &= ~(_field ## _MASK << _field ## _SHIFT);      \
+               (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
+       } while (0)
+
+struct alx_hw {
+       struct pci_dev *pdev;
+       u8 __iomem *hw_addr;
+
+       /* current & permanent mac addr */
+       u8 mac_addr[ETH_ALEN];
+       u8 perm_addr[ETH_ALEN];
+
+       u16 mtu;
+       u16 imt;
+       u8 dma_chnl;
+       u8 max_dma_chnl;
+       /* tpd threshold to trig INT */
+       u32 ith_tpd;
+       u32 rx_ctrl;
+       u32 mc_hash[2];
+
+       u32 smb_timer;
+       /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
+       int link_speed;
+
+       /* auto-neg advertisement or force mode config */
+       u32 adv_cfg;
+       u8 flowctrl;
+
+       u32 sleep_ctrl;
+
+       spinlock_t mdio_lock;
+       struct mdio_if_info mdio;
+       u16 phy_id[2];
+
+       /* PHY link patch flag */
+       bool lnk_patch;
+};
+
+static inline int alx_hw_revision(struct alx_hw *hw)
+{
+       return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
+}
+
+static inline bool alx_hw_with_cr(struct alx_hw *hw)
+{
+       return hw->pdev->revision & 1;
+}
+
+static inline bool alx_hw_giga(struct alx_hw *hw)
+{
+       return hw->pdev->device & 1;
+}
+
+static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
+{
+       writeb(val, hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
+{
+       writew(val, hw->hw_addr + reg);
+}
+
+static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
+{
+       return readw(hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
+{
+       writel(val, hw->hw_addr + reg);
+}
+
+static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
+{
+       return readl(hw->hw_addr + reg);
+}
+
+static inline void alx_post_write(struct alx_hw *hw)
+{
+       readl(hw->hw_addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+void alx_reset_phy(struct alx_hw *hw);
+void alx_reset_pcie(struct alx_hw *hw);
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
+void alx_post_phy_link(struct alx_hw *hw);
+int alx_pre_suspend(struct alx_hw *hw, int speed);
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
+int alx_get_phy_link(struct alx_hw *hw, int *speed);
+int alx_clear_phy_intr(struct alx_hw *hw);
+int alx_config_wol(struct alx_hw *hw);
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
+void alx_start_mac(struct alx_hw *hw);
+int alx_reset_mac(struct alx_hw *hw);
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
+bool alx_phy_configured(struct alx_hw *hw);
+void alx_configure_basic(struct alx_hw *hw);
+void alx_disable_rss(struct alx_hw *hw);
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
+bool alx_get_phy_info(struct alx_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644 (file)
index 0000000..418de8b
--- /dev/null
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ip6_checksum.h>
+#include <linux/crc32.h>
+#include "alx.h"
+#include "hw.h"
+#include "reg.h"
+
+const char alx_drv_name[] = "alx";
+
+
+static void alx_free_txbuf(struct alx_priv *alx, int entry)
+{
+       struct alx_buffer *txb = &alx->txq.bufs[entry];
+
+       if (dma_unmap_len(txb, size)) {
+               dma_unmap_single(&alx->hw.pdev->dev,
+                                dma_unmap_addr(txb, dma),
+                                dma_unmap_len(txb, size),
+                                DMA_TO_DEVICE);
+               dma_unmap_len_set(txb, size, 0);
+       }
+
+       if (txb->skb) {
+               dev_kfree_skb_any(txb->skb);
+               txb->skb = NULL;
+       }
+}
+
+static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
+{
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct sk_buff *skb;
+       struct alx_buffer *cur_buf;
+       dma_addr_t dma;
+       u16 cur, next, count = 0;
+
+       next = cur = rxq->write_idx;
+       if (++next == alx->rx_ringsz)
+               next = 0;
+       cur_buf = &rxq->bufs[cur];
+
+       while (!cur_buf->skb && next != rxq->read_idx) {
+               struct alx_rfd *rfd = &rxq->rfd[cur];
+
+               skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+               if (!skb)
+                       break;
+               dma = dma_map_single(&alx->hw.pdev->dev,
+                                    skb->data, alx->rxbuf_size,
+                                    DMA_FROM_DEVICE);
+               if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
+                       dev_kfree_skb(skb);
+                       break;
+               }
+
+               /* Unfortunately, RX descriptor buffers must be 4-byte
+                * aligned, so we can't use IP alignment.
+                */
+               if (WARN_ON(dma & 3)) {
+                       dev_kfree_skb(skb);
+                       break;
+               }
+
+               cur_buf->skb = skb;
+               dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
+               dma_unmap_addr_set(cur_buf, dma, dma);
+               rfd->addr = cpu_to_le64(dma);
+
+               cur = next;
+               if (++next == alx->rx_ringsz)
+                       next = 0;
+               cur_buf = &rxq->bufs[cur];
+               count++;
+       }
+
+       if (count) {
+               /* flush all updates before updating hardware */
+               wmb();
+               rxq->write_idx = cur;
+               alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
+       }
+
+       return count;
+}
+
+static inline int alx_tpd_avail(struct alx_priv *alx)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+
+       if (txq->write_idx >= txq->read_idx)
+               return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
+       return txq->read_idx - txq->write_idx - 1;
+}
+
+static bool alx_clean_tx_irq(struct alx_priv *alx)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+       u16 hw_read_idx, sw_read_idx;
+       unsigned int total_bytes = 0, total_packets = 0;
+       int budget = ALX_DEFAULT_TX_WORK;
+
+       sw_read_idx = txq->read_idx;
+       hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
+
+       if (sw_read_idx != hw_read_idx) {
+               while (sw_read_idx != hw_read_idx && budget > 0) {
+                       struct sk_buff *skb;
+
+                       skb = txq->bufs[sw_read_idx].skb;
+                       if (skb) {
+                               total_bytes += skb->len;
+                               total_packets++;
+                               budget--;
+                       }
+
+                       alx_free_txbuf(alx, sw_read_idx);
+
+                       if (++sw_read_idx == alx->tx_ringsz)
+                               sw_read_idx = 0;
+               }
+               txq->read_idx = sw_read_idx;
+
+               netdev_completed_queue(alx->dev, total_packets, total_bytes);
+       }
+
+       if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
+           alx_tpd_avail(alx) > alx->tx_ringsz/4)
+               netif_wake_queue(alx->dev);
+
+       return sw_read_idx == hw_read_idx;
+}
+
+static void alx_schedule_link_check(struct alx_priv *alx)
+{
+       schedule_work(&alx->link_check_wk);
+}
+
+static void alx_schedule_reset(struct alx_priv *alx)
+{
+       schedule_work(&alx->reset_wk);
+}
+
+static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+{
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct alx_rrd *rrd;
+       struct alx_buffer *rxb;
+       struct sk_buff *skb;
+       u16 length, rfd_cleaned = 0;
+
+       while (budget > 0) {
+               rrd = &rxq->rrd[rxq->rrd_read_idx];
+               if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+                       break;
+               rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
+
+               if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+                                 RRD_SI) != rxq->read_idx ||
+                   ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+                                 RRD_NOR) != 1) {
+                       alx_schedule_reset(alx);
+                       return 0;
+               }
+
+               rxb = &rxq->bufs[rxq->read_idx];
+               dma_unmap_single(&alx->hw.pdev->dev,
+                                dma_unmap_addr(rxb, dma),
+                                dma_unmap_len(rxb, size),
+                                DMA_FROM_DEVICE);
+               dma_unmap_len_set(rxb, size, 0);
+               skb = rxb->skb;
+               rxb->skb = NULL;
+
+               if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
+                   rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
+                       rrd->word3 = 0;
+                       dev_kfree_skb_any(skb);
+                       goto next_pkt;
+               }
+
+               length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
+                                      RRD_PKTLEN) - ETH_FCS_LEN;
+               skb_put(skb, length);
+               skb->protocol = eth_type_trans(skb, alx->dev);
+
+               skb_checksum_none_assert(skb);
+               if (alx->dev->features & NETIF_F_RXCSUM &&
+                   !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
+                                   cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
+                       switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
+                                             RRD_PID)) {
+                       case RRD_PID_IPV6UDP:
+                       case RRD_PID_IPV4UDP:
+                       case RRD_PID_IPV4TCP:
+                       case RRD_PID_IPV6TCP:
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               break;
+                       }
+               }
+
+               napi_gro_receive(&alx->napi, skb);
+               budget--;
+
+next_pkt:
+               if (++rxq->read_idx == alx->rx_ringsz)
+                       rxq->read_idx = 0;
+               if (++rxq->rrd_read_idx == alx->rx_ringsz)
+                       rxq->rrd_read_idx = 0;
+
+               if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
+                       rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
+       }
+
+       if (rfd_cleaned)
+               alx_refill_rx_ring(alx, GFP_ATOMIC);
+
+       return budget > 0;
+}
+
+static int alx_poll(struct napi_struct *napi, int budget)
+{
+       struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+       struct alx_hw *hw = &alx->hw;
+       bool complete = true;
+       unsigned long flags;
+
+       complete = alx_clean_tx_irq(alx) &&
+                  alx_clean_rx_irq(alx, budget);
+
+       if (!complete)
+               return 1;
+
+       napi_complete(&alx->napi);
+
+       /* enable interrupt */
+       spin_lock_irqsave(&alx->irq_lock, flags);
+       alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+       alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+       spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+       alx_post_write(hw);
+
+       return 0;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+       struct alx_hw *hw = &alx->hw;
+       bool write_int_mask = false;
+
+       spin_lock(&alx->irq_lock);
+
+       /* ACK interrupt */
+       alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+       intr &= alx->int_mask;
+
+       if (intr & ALX_ISR_FATAL) {
+               netif_warn(alx, hw, alx->dev,
+                          "fatal interrupt 0x%x, resetting\n", intr);
+               alx_schedule_reset(alx);
+               goto out;
+       }
+
+       if (intr & ALX_ISR_ALERT)
+               netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
+
+       if (intr & ALX_ISR_PHY) {
+               /* suppress PHY interrupt, because the source
+                * is from PHY internal. only the internal status
+                * is cleared, the interrupt status could be cleared.
+                */
+               alx->int_mask &= ~ALX_ISR_PHY;
+               write_int_mask = true;
+               alx_schedule_link_check(alx);
+       }
+
+       if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
+               napi_schedule(&alx->napi);
+               /* mask rx/tx interrupt, enable them when napi complete */
+               alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+               write_int_mask = true;
+       }
+
+       if (write_int_mask)
+               alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+
+       alx_write_mem32(hw, ALX_ISR, 0);
+
+ out:
+       spin_unlock(&alx->irq_lock);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msi(int irq, void *data)
+{
+       struct alx_priv *alx = data;
+
+       return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
+}
+
+static irqreturn_t alx_intr_legacy(int irq, void *data)
+{
+       struct alx_priv *alx = data;
+       struct alx_hw *hw = &alx->hw;
+       u32 intr;
+
+       intr = alx_read_mem32(hw, ALX_ISR);
+
+       if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
+               return IRQ_NONE;
+
+       return alx_intr_handle(alx, intr);
+}
+
+static void alx_init_ring_ptrs(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+       u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
+
+       alx->rxq.read_idx = 0;
+       alx->rxq.write_idx = 0;
+       alx->rxq.rrd_read_idx = 0;
+       alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
+       alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
+       alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
+       alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
+       alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
+       alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
+
+       alx->txq.read_idx = 0;
+       alx->txq.write_idx = 0;
+       alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
+       alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
+       alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
+
+       /* load these pointers into the chip */
+       alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
+}
+
+static void alx_free_txring_buf(struct alx_priv *alx)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+       int i;
+
+       if (!txq->bufs)
+               return;
+
+       for (i = 0; i < alx->tx_ringsz; i++)
+               alx_free_txbuf(alx, i);
+
+       memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
+       memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
+       txq->write_idx = 0;
+       txq->read_idx = 0;
+
+       netdev_reset_queue(alx->dev);
+}
+
+static void alx_free_rxring_buf(struct alx_priv *alx)
+{
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct alx_buffer *cur_buf;
+       u16 i;
+
+       if (rxq == NULL)
+               return;
+
+       for (i = 0; i < alx->rx_ringsz; i++) {
+               cur_buf = rxq->bufs + i;
+               if (cur_buf->skb) {
+                       dma_unmap_single(&alx->hw.pdev->dev,
+                                        dma_unmap_addr(cur_buf, dma),
+                                        dma_unmap_len(cur_buf, size),
+                                        DMA_FROM_DEVICE);
+                       dev_kfree_skb(cur_buf->skb);
+                       cur_buf->skb = NULL;
+                       dma_unmap_len_set(cur_buf, size, 0);
+                       dma_unmap_addr_set(cur_buf, dma, 0);
+               }
+       }
+
+       rxq->write_idx = 0;
+       rxq->read_idx = 0;
+       rxq->rrd_read_idx = 0;
+}
+
+static void alx_free_buffers(struct alx_priv *alx)
+{
+       alx_free_txring_buf(alx);
+       alx_free_rxring_buf(alx);
+}
+
+static int alx_reinit_rings(struct alx_priv *alx)
+{
+       alx_free_buffers(alx);
+
+       alx_init_ring_ptrs(alx);
+
+       if (!alx_refill_rx_ring(alx, GFP_KERNEL))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
+{
+       u32 crc32, bit, reg;
+
+       crc32 = ether_crc(ETH_ALEN, addr);
+       reg = (crc32 >> 31) & 0x1;
+       bit = (crc32 >> 26) & 0x1F;
+
+       mc_hash[reg] |= BIT(bit);
+}
+
+static void __alx_set_rx_mode(struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       struct netdev_hw_addr *ha;
+       u32 mc_hash[2] = {};
+
+       if (!(netdev->flags & IFF_ALLMULTI)) {
+               netdev_for_each_mc_addr(ha, netdev)
+                       alx_add_mc_addr(hw, ha->addr, mc_hash);
+
+               alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
+               alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
+       }
+
+       hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
+       if (netdev->flags & IFF_PROMISC)
+               hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
+       if (netdev->flags & IFF_ALLMULTI)
+               hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
+
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_set_rx_mode(struct net_device *netdev)
+{
+       __alx_set_rx_mode(netdev);
+}
+
+static int alx_set_mac_address(struct net_device *netdev, void *data)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       struct sockaddr *addr = data;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       if (netdev->addr_assign_type & NET_ADDR_RANDOM)
+               netdev->addr_assign_type ^= NET_ADDR_RANDOM;
+
+       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+       memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+       alx_set_macaddr(hw, hw->mac_addr);
+
+       return 0;
+}
+
+static int alx_alloc_descriptors(struct alx_priv *alx)
+{
+       alx->txq.bufs = kcalloc(alx->tx_ringsz,
+                               sizeof(struct alx_buffer),
+                               GFP_KERNEL);
+       if (!alx->txq.bufs)
+               return -ENOMEM;
+
+       alx->rxq.bufs = kcalloc(alx->rx_ringsz,
+                               sizeof(struct alx_buffer),
+                               GFP_KERNEL);
+       if (!alx->rxq.bufs)
+               goto out_free;
+
+       /* physical tx/rx ring descriptors
+        *
+        * Allocate them as a single chunk because they must not cross a
+        * 4G boundary (hardware has a single register for high 32 bits
+        * of addresses only)
+        */
+       alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
+                           sizeof(struct alx_rrd) * alx->rx_ringsz +
+                           sizeof(struct alx_rfd) * alx->rx_ringsz;
+       alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
+                                               alx->descmem.size,
+                                               &alx->descmem.dma,
+                                               GFP_KERNEL);
+       if (!alx->descmem.virt)
+               goto out_free;
+
+       alx->txq.tpd = (void *)alx->descmem.virt;
+       alx->txq.tpd_dma = alx->descmem.dma;
+
+       /* alignment requirement for next block */
+       BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
+
+       alx->rxq.rrd =
+               (void *)((u8 *)alx->descmem.virt +
+                        sizeof(struct alx_txd) * alx->tx_ringsz);
+       alx->rxq.rrd_dma = alx->descmem.dma +
+                          sizeof(struct alx_txd) * alx->tx_ringsz;
+
+       /* alignment requirement for next block */
+       BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
+
+       alx->rxq.rfd =
+               (void *)((u8 *)alx->descmem.virt +
+                        sizeof(struct alx_txd) * alx->tx_ringsz +
+                        sizeof(struct alx_rrd) * alx->rx_ringsz);
+       alx->rxq.rfd_dma = alx->descmem.dma +
+                          sizeof(struct alx_txd) * alx->tx_ringsz +
+                          sizeof(struct alx_rrd) * alx->rx_ringsz;
+
+       return 0;
+out_free:
+       kfree(alx->txq.bufs);
+       kfree(alx->rxq.bufs);
+       return -ENOMEM;
+}
+
+static int alx_alloc_rings(struct alx_priv *alx)
+{
+       int err;
+
+       err = alx_alloc_descriptors(alx);
+       if (err)
+               return err;
+
+       alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+       alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+       alx->tx_ringsz = alx->tx_ringsz;
+
+       netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
+
+       alx_reinit_rings(alx);
+       return 0;
+}
+
+static void alx_free_rings(struct alx_priv *alx)
+{
+       netif_napi_del(&alx->napi);
+       alx_free_buffers(alx);
+
+       kfree(alx->txq.bufs);
+       kfree(alx->rxq.bufs);
+
+       dma_free_coherent(&alx->hw.pdev->dev,
+                         alx->descmem.size,
+                         alx->descmem.virt,
+                         alx->descmem.dma);
+}
+
+static void alx_config_vector_mapping(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+       alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
+       alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
+}
+
+static void alx_irq_enable(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       /* level-1 interrupt switch */
+       alx_write_mem32(hw, ALX_ISR, 0);
+       alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+       alx_post_write(hw);
+}
+
+static void alx_irq_disable(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+       alx_write_mem32(hw, ALX_IMR, 0);
+       alx_post_write(hw);
+
+       synchronize_irq(alx->hw.pdev->irq);
+}
+
+static int alx_request_irq(struct alx_priv *alx)
+{
+       struct pci_dev *pdev = alx->hw.pdev;
+       struct alx_hw *hw = &alx->hw;
+       int err;
+       u32 msi_ctrl;
+
+       msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
+
+       if (!pci_enable_msi(alx->hw.pdev)) {
+               alx->msi = true;
+
+               alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
+                               msi_ctrl | ALX_MSI_MASK_SEL_LINE);
+               err = request_irq(pdev->irq, alx_intr_msi, 0,
+                                 alx->dev->name, alx);
+               if (!err)
+                       goto out;
+               /* fall back to legacy interrupt */
+               pci_disable_msi(alx->hw.pdev);
+       }
+
+       alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
+       err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+                         alx->dev->name, alx);
+out:
+       if (!err)
+               alx_config_vector_mapping(alx);
+       return err;
+}
+
+static void alx_free_irq(struct alx_priv *alx)
+{
+       struct pci_dev *pdev = alx->hw.pdev;
+
+       free_irq(pdev->irq, alx);
+
+       if (alx->msi) {
+               pci_disable_msi(alx->hw.pdev);
+               alx->msi = false;
+       }
+}
+
+static int alx_identify_hw(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+       int rev = alx_hw_revision(hw);
+
+       if (rev > ALX_REV_C0)
+               return -EINVAL;
+
+       hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
+
+       return 0;
+}
+
+static int alx_init_sw(struct alx_priv *alx)
+{
+       struct pci_dev *pdev = alx->hw.pdev;
+       struct alx_hw *hw = &alx->hw;
+       int err;
+
+       err = alx_identify_hw(alx);
+       if (err) {
+               dev_err(&pdev->dev, "unrecognized chip, aborting\n");
+               return err;
+       }
+
+       alx->hw.lnk_patch =
+               pdev->device == ALX_DEV_ID_AR8161 &&
+               pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
+               pdev->subsystem_device == 0x0091 &&
+               pdev->revision == 0;
+
+       hw->smb_timer = 400;
+       hw->mtu = alx->dev->mtu;
+       alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
+       alx->tx_ringsz = 256;
+       alx->rx_ringsz = 512;
+       hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
+       hw->imt = 200;
+       alx->int_mask = ALX_ISR_MISC;
+       hw->dma_chnl = hw->max_dma_chnl;
+       hw->ith_tpd = alx->tx_ringsz / 3;
+       hw->link_speed = SPEED_UNKNOWN;
+       hw->adv_cfg = ADVERTISED_Autoneg |
+                     ADVERTISED_10baseT_Half |
+                     ADVERTISED_10baseT_Full |
+                     ADVERTISED_100baseT_Full |
+                     ADVERTISED_100baseT_Half |
+                     ADVERTISED_1000baseT_Full;
+       hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
+
+       hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
+                     ALX_MAC_CTRL_MHASH_ALG_HI5B |
+                     ALX_MAC_CTRL_BRD_EN |
+                     ALX_MAC_CTRL_PCRCE |
+                     ALX_MAC_CTRL_CRCE |
+                     ALX_MAC_CTRL_RXFC_EN |
+                     ALX_MAC_CTRL_TXFC_EN |
+                     7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
+
+       return err;
+}
+
+
+static netdev_features_t alx_fix_features(struct net_device *netdev,
+                                         netdev_features_t features)
+{
+       if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
+               features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+       return features;
+}
+
+static void alx_netif_stop(struct alx_priv *alx)
+{
+       alx->dev->trans_start = jiffies;
+       if (netif_carrier_ok(alx->dev)) {
+               netif_carrier_off(alx->dev);
+               netif_tx_disable(alx->dev);
+               napi_disable(&alx->napi);
+       }
+}
+
+static void alx_halt(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_netif_stop(alx);
+       hw->link_speed = SPEED_UNKNOWN;
+
+       alx_reset_mac(hw);
+
+       /* disable l0s/l1 */
+       alx_enable_aspm(hw, false, false);
+       alx_irq_disable(alx);
+       alx_free_buffers(alx);
+}
+
+static void alx_configure(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+
+       alx_configure_basic(hw);
+       alx_disable_rss(hw);
+       __alx_set_rx_mode(alx->dev);
+
+       alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_activate(struct alx_priv *alx)
+{
+       /* hardware setting lost, restore it */
+       alx_reinit_rings(alx);
+       alx_configure(alx);
+
+       /* clear old interrupts */
+       alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+       alx_irq_enable(alx);
+
+       alx_schedule_link_check(alx);
+}
+
+static void alx_reinit(struct alx_priv *alx)
+{
+       ASSERT_RTNL();
+
+       alx_halt(alx);
+       alx_activate(alx);
+}
+
+static int alx_change_mtu(struct net_device *netdev, int mtu)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+       if ((max_frame < ALX_MIN_FRAME_SIZE) ||
+           (max_frame > ALX_MAX_FRAME_SIZE))
+               return -EINVAL;
+
+       if (netdev->mtu == mtu)
+               return 0;
+
+       netdev->mtu = mtu;
+       alx->hw.mtu = mtu;
+       alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
+                          ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
+       netdev_update_features(netdev);
+       if (netif_running(netdev))
+               alx_reinit(alx);
+       return 0;
+}
+
+static void alx_netif_start(struct alx_priv *alx)
+{
+       netif_tx_wake_all_queues(alx->dev);
+       napi_enable(&alx->napi);
+       netif_carrier_on(alx->dev);
+}
+
+static int __alx_open(struct alx_priv *alx, bool resume)
+{
+       int err;
+
+       if (!resume)
+               netif_carrier_off(alx->dev);
+
+       err = alx_alloc_rings(alx);
+       if (err)
+               return err;
+
+       alx_configure(alx);
+
+       err = alx_request_irq(alx);
+       if (err)
+               goto out_free_rings;
+
+       /* clear old interrupts */
+       alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+       alx_irq_enable(alx);
+
+       if (!resume)
+               netif_tx_start_all_queues(alx->dev);
+
+       alx_schedule_link_check(alx);
+       return 0;
+
+out_free_rings:
+       alx_free_rings(alx);
+       return err;
+}
+
+static void __alx_stop(struct alx_priv *alx)
+{
+       alx_halt(alx);
+       alx_free_irq(alx);
+       alx_free_rings(alx);
+}
+
+static const char *alx_speed_desc(u16 speed)
+{
+       switch (speed) {
+       case SPEED_1000 + DUPLEX_FULL:
+               return "1 Gbps Full";
+       case SPEED_100 + DUPLEX_FULL:
+               return "100 Mbps Full";
+       case SPEED_100 + DUPLEX_HALF:
+               return "100 Mbps Half";
+       case SPEED_10 + DUPLEX_FULL:
+               return "10 Mbps Full";
+       case SPEED_10 + DUPLEX_HALF:
+               return "10 Mbps Half";
+       default:
+               return "Unknown speed";
+       }
+}
+
+static void alx_check_link(struct alx_priv *alx)
+{
+       struct alx_hw *hw = &alx->hw;
+       unsigned long flags;
+       int speed, old_speed;
+       int err;
+
+       /* clear PHY internal interrupt status, otherwise the main
+        * interrupt status will be asserted forever
+        */
+       alx_clear_phy_intr(hw);
+
+       err = alx_get_phy_link(hw, &speed);
+       if (err < 0)
+               goto reset;
+
+       spin_lock_irqsave(&alx->irq_lock, flags);
+       alx->int_mask |= ALX_ISR_PHY;
+       alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+       spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+       old_speed = hw->link_speed;
+
+       if (old_speed == speed)
+               return;
+       hw->link_speed = speed;
+
+       if (speed != SPEED_UNKNOWN) {
+               netif_info(alx, link, alx->dev,
+                          "NIC Up: %s\n", alx_speed_desc(speed));
+               alx_post_phy_link(hw);
+               alx_enable_aspm(hw, true, true);
+               alx_start_mac(hw);
+
+               if (old_speed == SPEED_UNKNOWN)
+                       alx_netif_start(alx);
+       } else {
+               /* link is now down */
+               alx_netif_stop(alx);
+               netif_info(alx, link, alx->dev, "Link Down\n");
+               err = alx_reset_mac(hw);
+               if (err)
+                       goto reset;
+               alx_irq_disable(alx);
+
+               /* MAC reset causes all HW settings to be lost, restore all */
+               err = alx_reinit_rings(alx);
+               if (err)
+                       goto reset;
+               alx_configure(alx);
+               alx_enable_aspm(hw, false, true);
+               alx_post_phy_link(hw);
+               alx_irq_enable(alx);
+       }
+
+       return;
+
+reset:
+       alx_schedule_reset(alx);
+}
+
+static int alx_open(struct net_device *netdev)
+{
+       return __alx_open(netdev_priv(netdev), false);
+}
+
+static int alx_stop(struct net_device *netdev)
+{
+       __alx_stop(netdev_priv(netdev));
+       return 0;
+}
+
+static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+       struct alx_hw *hw = &alx->hw;
+       int err, speed;
+
+       netif_device_detach(netdev);
+
+       if (netif_running(netdev))
+               __alx_stop(alx);
+
+#ifdef CONFIG_PM_SLEEP
+       err = pci_save_state(pdev);
+       if (err)
+               return err;
+#endif
+
+       err = alx_select_powersaving_speed(hw, &speed);
+       if (err)
+               return err;
+       err = alx_clear_phy_intr(hw);
+       if (err)
+               return err;
+       err = alx_pre_suspend(hw, speed);
+       if (err)
+               return err;
+       err = alx_config_wol(hw);
+       if (err)
+               return err;
+
+       *wol_en = false;
+       if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
+               netif_info(alx, wol, netdev,
+                          "wol: ctrl=%X, speed=%X\n",
+                          hw->sleep_ctrl, speed);
+               device_set_wakeup_enable(&pdev->dev, true);
+               *wol_en = true;
+       }
+
+       pci_disable_device(pdev);
+
+       return 0;
+}
+
+static void alx_shutdown(struct pci_dev *pdev)
+{
+       int err;
+       bool wol_en;
+
+       err = __alx_shutdown(pdev, &wol_en);
+       if (!err) {
+               pci_wake_from_d3(pdev, wol_en);
+               pci_set_power_state(pdev, PCI_D3hot);
+       } else {
+               dev_err(&pdev->dev, "shutdown fail %d\n", err);
+       }
+}
+
+static void alx_link_check(struct work_struct *work)
+{
+       struct alx_priv *alx;
+
+       alx = container_of(work, struct alx_priv, link_check_wk);
+
+       rtnl_lock();
+       alx_check_link(alx);
+       rtnl_unlock();
+}
+
+static void alx_reset(struct work_struct *work)
+{
+       struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
+
+       rtnl_lock();
+       alx_reinit(alx);
+       rtnl_unlock();
+}
+
+static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
+{
+       u8 cso, css;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       cso = skb_checksum_start_offset(skb);
+       if (cso & 1)
+               return -EINVAL;
+
+       css = cso + skb->csum_offset;
+       first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
+       first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
+       first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
+
+       return 0;
+}
+
+static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
+{
+       struct alx_tx_queue *txq = &alx->txq;
+       struct alx_txd *tpd, *first_tpd;
+       dma_addr_t dma;
+       int maplen, f, first_idx = txq->write_idx;
+
+       first_tpd = &txq->tpd[txq->write_idx];
+       tpd = first_tpd;
+
+       maplen = skb_headlen(skb);
+       dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
+                            DMA_TO_DEVICE);
+       if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+               goto err_dma;
+
+       dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+       dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+       tpd->adrl.addr = cpu_to_le64(dma);
+       tpd->len = cpu_to_le16(maplen);
+
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+               struct skb_frag_struct *frag;
+
+               frag = &skb_shinfo(skb)->frags[f];
+
+               if (++txq->write_idx == alx->tx_ringsz)
+                       txq->write_idx = 0;
+               tpd = &txq->tpd[txq->write_idx];
+
+               tpd->word1 = first_tpd->word1;
+
+               maplen = skb_frag_size(frag);
+               dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
+                                      maplen, DMA_TO_DEVICE);
+               if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+                       goto err_dma;
+               dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+               dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+               tpd->adrl.addr = cpu_to_le64(dma);
+               tpd->len = cpu_to_le16(maplen);
+       }
+
+       /* last TPD, set EOP flag and store skb */
+       tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
+       txq->bufs[txq->write_idx].skb = skb;
+
+       if (++txq->write_idx == alx->tx_ringsz)
+               txq->write_idx = 0;
+
+       return 0;
+
+err_dma:
+       f = first_idx;
+       while (f != txq->write_idx) {
+               alx_free_txbuf(alx, f);
+               if (++f == alx->tx_ringsz)
+                       f = 0;
+       }
+       return -ENOMEM;
+}
+
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+                                 struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_tx_queue *txq = &alx->txq;
+       struct alx_txd *first;
+       int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+
+       if (alx_tpd_avail(alx) < tpdreq) {
+               netif_stop_queue(alx->dev);
+               goto drop;
+       }
+
+       first = &txq->tpd[txq->write_idx];
+       memset(first, 0, sizeof(*first));
+
+       if (alx_tx_csum(skb, first))
+               goto drop;
+
+       if (alx_map_tx_skb(alx, skb) < 0)
+               goto drop;
+
+       netdev_sent_queue(alx->dev, skb->len);
+
+       /* flush updates before updating hardware */
+       wmb();
+       alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
+
+       if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
+               netif_stop_queue(alx->dev);
+
+       return NETDEV_TX_OK;
+
+drop:
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static void alx_tx_timeout(struct net_device *dev)
+{
+       struct alx_priv *alx = netdev_priv(dev);
+
+       alx_schedule_reset(alx);
+}
+
+static int alx_mdio_read(struct net_device *netdev,
+                        int prtad, int devad, u16 addr)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+       u16 val;
+       int err;
+
+       if (prtad != hw->mdio.prtad)
+               return -EINVAL;
+
+       if (devad == MDIO_DEVAD_NONE)
+               err = alx_read_phy_reg(hw, addr, &val);
+       else
+               err = alx_read_phy_ext(hw, devad, addr, &val);
+
+       if (err)
+               return err;
+       return val;
+}
+
+static int alx_mdio_write(struct net_device *netdev,
+                         int prtad, int devad, u16 addr, u16 val)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+       struct alx_hw *hw = &alx->hw;
+
+       if (prtad != hw->mdio.prtad)
+               return -EINVAL;
+
+       if (devad == MDIO_DEVAD_NONE)
+               return alx_write_phy_reg(hw, addr, val);
+
+       return alx_write_phy_ext(hw, devad, addr, val);
+}
+
+static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       if (!netif_running(netdev))
+               return -EAGAIN;
+
+       return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void alx_poll_controller(struct net_device *netdev)
+{
+       struct alx_priv *alx = netdev_priv(netdev);
+
+       if (alx->msi)
+               alx_intr_msi(0, alx);
+       else
+               alx_intr_legacy(0, alx);
+}
+#endif
+
+static const struct net_device_ops alx_netdev_ops = {
+       .ndo_open               = alx_open,
+       .ndo_stop               = alx_stop,
+       .ndo_start_xmit         = alx_start_xmit,
+       .ndo_set_rx_mode        = alx_set_rx_mode,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = alx_set_mac_address,
+       .ndo_change_mtu         = alx_change_mtu,
+       .ndo_do_ioctl           = alx_ioctl,
+       .ndo_tx_timeout         = alx_tx_timeout,
+       .ndo_fix_features       = alx_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = alx_poll_controller,
+#endif
+};
+
+static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct net_device *netdev;
+       struct alx_priv *alx;
+       struct alx_hw *hw;
+       bool phy_configured;
+       int bars, pm_cap, err;
+
+       err = pci_enable_device_mem(pdev);
+       if (err)
+               return err;
+
+       /* The alx chip can DMA to 64-bit addresses, but it uses a single
+        * shared register for the high 32 bits, so only a single, aligned,
+        * 4 GB physical address range can be used for descriptors.
+        */
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+               dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
+       } else {
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               if (err) {
+                       err = dma_set_coherent_mask(&pdev->dev,
+                                                   DMA_BIT_MASK(32));
+                       if (err) {
+                               dev_err(&pdev->dev,
+                                       "No usable DMA config, aborting\n");
+                               goto out_pci_disable;
+                       }
+               }
+       }
+
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "pci_request_selected_regions failed(bars:%d)\n", bars);
+               goto out_pci_disable;
+       }
+
+       pci_enable_pcie_error_reporting(pdev);
+       pci_set_master(pdev);
+
+       pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+       if (pm_cap == 0) {
+               dev_err(&pdev->dev,
+                       "Can't find power management capability, aborting\n");
+               err = -EIO;
+               goto out_pci_release;
+       }
+
+       err = pci_set_power_state(pdev, PCI_D0);
+       if (err)
+               goto out_pci_release;
+
+       netdev = alloc_etherdev(sizeof(*alx));
+       if (!netdev) {
+               err = -ENOMEM;
+               goto out_pci_release;
+       }
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+       alx = netdev_priv(netdev);
+       alx->dev = netdev;
+       alx->hw.pdev = pdev;
+       alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
+                         NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
+       hw = &alx->hw;
+       pci_set_drvdata(pdev, alx);
+
+       hw->hw_addr = pci_ioremap_bar(pdev, 0);
+       if (!hw->hw_addr) {
+               dev_err(&pdev->dev, "cannot map device registers\n");
+               err = -EIO;
+               goto out_free_netdev;
+       }
+
+       netdev->netdev_ops = &alx_netdev_ops;
+       SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+       netdev->irq = pdev->irq;
+       netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
+
+       if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
+               pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+
+       err = alx_init_sw(alx);
+       if (err) {
+               dev_err(&pdev->dev, "net device private data init failed\n");
+               goto out_unmap;
+       }
+
+       alx_reset_pcie(hw);
+
+       phy_configured = alx_phy_configured(hw);
+
+       if (!phy_configured)
+               alx_reset_phy(hw);
+
+       err = alx_reset_mac(hw);
+       if (err) {
+               dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
+               goto out_unmap;
+       }
+
+       /* setup link to put it in a known good starting state */
+       if (!phy_configured) {
+               err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "failed to configure PHY speed/duplex (err=%d)\n",
+                               err);
+                       goto out_unmap;
+               }
+       }
+
+       netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+
+       if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
+               dev_warn(&pdev->dev,
+                        "Invalid permanent address programmed, using random one\n");
+               eth_hw_addr_random(netdev);
+               memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
+       }
+
+       memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
+       memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+       memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
+
+       hw->mdio.prtad = 0;
+       hw->mdio.mmds = 0;
+       hw->mdio.dev = netdev;
+       hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
+                               MDIO_SUPPORTS_C22 |
+                               MDIO_EMULATE_C22;
+       hw->mdio.mdio_read = alx_mdio_read;
+       hw->mdio.mdio_write = alx_mdio_write;
+
+       if (!alx_get_phy_info(hw)) {
+               dev_err(&pdev->dev, "failed to identify PHY\n");
+               err = -EIO;
+               goto out_unmap;
+       }
+
+       INIT_WORK(&alx->link_check_wk, alx_link_check);
+       INIT_WORK(&alx->reset_wk, alx_reset);
+       spin_lock_init(&alx->hw.mdio_lock);
+       spin_lock_init(&alx->irq_lock);
+
+       netif_carrier_off(netdev);
+
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "register netdevice failed\n");
+               goto out_unmap;
+       }
+
+       device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
+
+       netdev_info(netdev,
+                   "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
+                   netdev->dev_addr);
+
+       return 0;
+
+out_unmap:
+       iounmap(hw->hw_addr);
+out_free_netdev:
+       free_netdev(netdev);
+out_pci_release:
+       pci_release_selected_regions(pdev, bars);
+out_pci_disable:
+       pci_disable_device(pdev);
+       return err;
+}
+
+static void alx_remove(struct pci_dev *pdev)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct alx_hw *hw = &alx->hw;
+
+       cancel_work_sync(&alx->link_check_wk);
+       cancel_work_sync(&alx->reset_wk);
+
+       /* restore permanent mac address */
+       alx_set_macaddr(hw, hw->perm_addr);
+
+       unregister_netdev(alx->dev);
+       iounmap(hw->hw_addr);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
+
+       pci_disable_pcie_error_reporting(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       free_netdev(alx->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int alx_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int err;
+       bool wol_en;
+
+       err = __alx_shutdown(pdev, &wol_en);
+       if (err) {
+               dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
+               return err;
+       }
+
+       if (wol_en) {
+               pci_prepare_to_sleep(pdev);
+       } else {
+               pci_wake_from_d3(pdev, false);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+
+       return 0;
+}
+
+static int alx_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+       struct alx_hw *hw = &alx->hw;
+       int err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_save_state(pdev);
+
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
+
+       hw->link_speed = SPEED_UNKNOWN;
+       alx->int_mask = ALX_ISR_MISC;
+
+       alx_reset_pcie(hw);
+       alx_reset_phy(hw);
+
+       err = alx_reset_mac(hw);
+       if (err) {
+               netif_err(alx, hw, alx->dev,
+                         "resume:reset_mac fail %d\n", err);
+               return -EIO;
+       }
+
+       err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+       if (err) {
+               netif_err(alx, hw, alx->dev,
+                         "resume:setup_speed_duplex fail %d\n", err);
+               return -EIO;
+       }
+
+       if (netif_running(netdev)) {
+               err = __alx_open(alx, true);
+               if (err)
+                       return err;
+       }
+
+       netif_device_attach(netdev);
+
+       return err;
+}
+#endif
+
+static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+                                              pci_channel_state_t state)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+       pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
+
+       dev_info(&pdev->dev, "pci error detected\n");
+
+       rtnl_lock();
+
+       if (netif_running(netdev)) {
+               netif_device_detach(netdev);
+               alx_halt(alx);
+       }
+
+       if (state == pci_channel_io_perm_failure)
+               rc = PCI_ERS_RESULT_DISCONNECT;
+       else
+               pci_disable_device(pdev);
+
+       rtnl_unlock();
+
+       return rc;
+}
+
+static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct alx_hw *hw = &alx->hw;
+       pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+       dev_info(&pdev->dev, "pci error slot reset\n");
+
+       rtnl_lock();
+
+       if (pci_enable_device(pdev)) {
+               dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
+               goto out;
+       }
+
+       pci_set_master(pdev);
+       pci_enable_wake(pdev, PCI_D3hot, 0);
+       pci_enable_wake(pdev, PCI_D3cold, 0);
+
+       alx_reset_pcie(hw);
+       if (!alx_reset_mac(hw))
+               rc = PCI_ERS_RESULT_RECOVERED;
+out:
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+
+       rtnl_unlock();
+
+       return rc;
+}
+
+static void alx_pci_error_resume(struct pci_dev *pdev)
+{
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct net_device *netdev = alx->dev;
+
+       dev_info(&pdev->dev, "pci error resume\n");
+
+       rtnl_lock();
+
+       if (netif_running(netdev)) {
+               alx_activate(alx);
+               netif_device_attach(netdev);
+       }
+
+       rtnl_unlock();
+}
+
+static const struct pci_error_handlers alx_err_handlers = {
+       .error_detected = alx_pci_error_detected,
+       .slot_reset     = alx_pci_error_slot_reset,
+       .resume         = alx_pci_error_resume,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+#define ALX_PM_OPS      (&alx_pm_ops)
+#else
+#define ALX_PM_OPS      NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
+         .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
+       { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
+       {}
+};
+
+static struct pci_driver alx_driver = {
+       .name        = alx_drv_name,
+       .id_table    = alx_pci_tbl,
+       .probe       = alx_probe,
+       .remove      = alx_remove,
+       .shutdown    = alx_shutdown,
+       .err_handler = &alx_err_handlers,
+       .driver.pm   = ALX_PM_OPS,
+};
+
+module_pci_driver(alx_driver);
+MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION(
+       "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644 (file)
index 0000000..e4358c9
--- /dev/null
@@ -0,0 +1,810 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_REG_H
+#define ALX_REG_H
+
+#define ALX_DEV_ID_AR8161                              0x1091
+#define ALX_DEV_ID_E2200                               0xe091
+#define ALX_DEV_ID_AR8162                              0x1090
+#define ALX_DEV_ID_AR8171                              0x10A1
+#define ALX_DEV_ID_AR8172                              0x10A0
+
+/* rev definition,
+ * bit(0): with xD support
+ * bit(1): with Card Reader function
+ * bit(7:2): real revision
+ */
+#define ALX_PCI_REVID_SHIFT                            3
+#define ALX_REV_A0                                     0
+#define ALX_REV_A1                                     1
+#define ALX_REV_B0                                     2
+#define ALX_REV_C0                                     3
+
+#define ALX_DEV_CTRL                                   0x0060
+#define ALX_DEV_CTRL_MAXRRS_MIN                                2
+
+#define ALX_MSIX_MASK                                  0x0090
+
+#define ALX_UE_SVRT                                    0x010C
+#define ALX_UE_SVRT_FCPROTERR                          BIT(13)
+#define ALX_UE_SVRT_DLPROTERR                          BIT(4)
+
+/* eeprom & flash load register */
+#define ALX_EFLD                                       0x0204
+#define ALX_EFLD_F_EXIST                               BIT(10)
+#define ALX_EFLD_E_EXIST                               BIT(9)
+#define ALX_EFLD_STAT                                  BIT(5)
+#define ALX_EFLD_START                                 BIT(0)
+
+/* eFuse load register */
+#define ALX_SLD                                                0x0218
+#define ALX_SLD_STAT                                   BIT(12)
+#define ALX_SLD_START                                  BIT(11)
+#define ALX_SLD_MAX_TO                                 100
+
+#define ALX_PDLL_TRNS1                                 0x1104
+#define ALX_PDLL_TRNS1_D3PLLOFF_EN                     BIT(11)
+
+#define ALX_PMCTRL                                     0x12F8
+#define ALX_PMCTRL_HOTRST_WTEN                         BIT(31)
+/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
+#define ALX_PMCTRL_ASPM_FCEN                           BIT(30)
+#define ALX_PMCTRL_SADLY_EN                            BIT(29)
+#define ALX_PMCTRL_LCKDET_TIMER_MASK                   0xF
+#define ALX_PMCTRL_LCKDET_TIMER_SHIFT                  24
+#define ALX_PMCTRL_LCKDET_TIMER_DEF                    0xC
+/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
+#define ALX_PMCTRL_L1REQ_TO_MASK                       0xF
+#define ALX_PMCTRL_L1REQ_TO_SHIFT                      20
+#define ALX_PMCTRL_L1REG_TO_DEF                                0xF
+#define ALX_PMCTRL_TXL1_AFTER_L0S                      BIT(19)
+#define ALX_PMCTRL_L1_TIMER_MASK                       0x7
+#define ALX_PMCTRL_L1_TIMER_SHIFT                      16
+#define ALX_PMCTRL_L1_TIMER_16US                       4
+#define ALX_PMCTRL_RCVR_WT_1US                         BIT(15)
+/* bit13: enable pcie clk switch in L1 state */
+#define ALX_PMCTRL_L1_CLKSW_EN                         BIT(13)
+#define ALX_PMCTRL_L0S_EN                              BIT(12)
+#define ALX_PMCTRL_RXL1_AFTER_L0S                      BIT(11)
+#define ALX_PMCTRL_L1_BUFSRX_EN                                BIT(7)
+/* bit6: power down serdes RX */
+#define ALX_PMCTRL_L1_SRDSRX_PWD                       BIT(6)
+#define ALX_PMCTRL_L1_SRDSPLL_EN                       BIT(5)
+#define ALX_PMCTRL_L1_SRDS_EN                          BIT(4)
+#define ALX_PMCTRL_L1_EN                               BIT(3)
+
+/*******************************************************/
+/* following registers are mapped only to memory space */
+/*******************************************************/
+
+#define ALX_MASTER                                     0x1400
+/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
+#define ALX_MASTER_PCLKSEL_SRDS                                BIT(12)
+/* bit11: irq moduration for rx */
+#define ALX_MASTER_IRQMOD2_EN                          BIT(11)
+/* bit10: irq moduration for tx/rx */
+#define ALX_MASTER_IRQMOD1_EN                          BIT(10)
+#define ALX_MASTER_SYSALVTIMER_EN                      BIT(7)
+#define ALX_MASTER_OOB_DIS                             BIT(6)
+/* bit5: wakeup without pcie clk */
+#define ALX_MASTER_WAKEN_25M                           BIT(5)
+/* bit0: MAC & DMA reset */
+#define ALX_MASTER_DMA_MAC_RST                         BIT(0)
+#define ALX_DMA_MAC_RST_TO                             50
+
+#define ALX_IRQ_MODU_TIMER                             0x1408
+#define ALX_IRQ_MODU_TIMER1_MASK                       0xFFFF
+#define ALX_IRQ_MODU_TIMER1_SHIFT                      0
+
+#define ALX_PHY_CTRL                                   0x140C
+#define ALX_PHY_CTRL_100AB_EN                          BIT(17)
+/* bit14: affect MAC & PHY, go to low power sts */
+#define ALX_PHY_CTRL_POWER_DOWN                                BIT(14)
+/* bit13: 1:pll always ON, 0:can switch in lpw */
+#define ALX_PHY_CTRL_PLL_ON                            BIT(13)
+#define ALX_PHY_CTRL_RST_ANALOG                                BIT(12)
+#define ALX_PHY_CTRL_HIB_PULSE                         BIT(11)
+#define ALX_PHY_CTRL_HIB_EN                            BIT(10)
+#define ALX_PHY_CTRL_IDDQ                              BIT(7)
+#define ALX_PHY_CTRL_GATE_25M                          BIT(5)
+#define ALX_PHY_CTRL_LED_MODE                          BIT(2)
+/* bit0: out of dsp RST state */
+#define ALX_PHY_CTRL_DSPRST_OUT                                BIT(0)
+#define ALX_PHY_CTRL_DSPRST_TO                         80
+#define ALX_PHY_CTRL_CLS       (ALX_PHY_CTRL_LED_MODE | \
+                                ALX_PHY_CTRL_100AB_EN | \
+                                ALX_PHY_CTRL_PLL_ON)
+
+#define ALX_MAC_STS                                    0x1410
+#define ALX_MAC_STS_TXQ_BUSY                           BIT(3)
+#define ALX_MAC_STS_RXQ_BUSY                           BIT(2)
+#define ALX_MAC_STS_TXMAC_BUSY                         BIT(1)
+#define ALX_MAC_STS_RXMAC_BUSY                         BIT(0)
+#define ALX_MAC_STS_IDLE       (ALX_MAC_STS_TXQ_BUSY | \
+                                ALX_MAC_STS_RXQ_BUSY | \
+                                ALX_MAC_STS_TXMAC_BUSY | \
+                                ALX_MAC_STS_RXMAC_BUSY)
+
+#define ALX_MDIO                                       0x1414
+#define ALX_MDIO_MODE_EXT                              BIT(30)
+#define ALX_MDIO_BUSY                                  BIT(27)
+#define ALX_MDIO_CLK_SEL_MASK                          0x7
+#define ALX_MDIO_CLK_SEL_SHIFT                         24
+#define ALX_MDIO_CLK_SEL_25MD4                         0
+#define ALX_MDIO_CLK_SEL_25MD128                       7
+#define ALX_MDIO_START                                 BIT(23)
+#define ALX_MDIO_SPRES_PRMBL                           BIT(22)
+/* bit21: 1:read,0:write */
+#define ALX_MDIO_OP_READ                               BIT(21)
+#define ALX_MDIO_REG_MASK                              0x1F
+#define ALX_MDIO_REG_SHIFT                             16
+#define ALX_MDIO_DATA_MASK                             0xFFFF
+#define ALX_MDIO_DATA_SHIFT                            0
+#define ALX_MDIO_MAX_AC_TO                             120
+
+#define ALX_MDIO_EXTN                                  0x1448
+#define ALX_MDIO_EXTN_DEVAD_MASK                       0x1F
+#define ALX_MDIO_EXTN_DEVAD_SHIFT                      16
+#define ALX_MDIO_EXTN_REG_MASK                         0xFFFF
+#define ALX_MDIO_EXTN_REG_SHIFT                                0
+
+#define ALX_SERDES                                     0x1424
+#define ALX_SERDES_PHYCLK_SLWDWN                       BIT(18)
+#define ALX_SERDES_MACCLK_SLWDWN                       BIT(17)
+
+#define ALX_LPI_CTRL                                   0x1440
+#define ALX_LPI_CTRL_EN                                        BIT(0)
+
+/* for B0+, bit[13..] for C0+ */
+#define ALX_HRTBT_EXT_CTRL                             0x1AD0
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK            0x3F
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT           24
+#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN         BIT(23)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED           BIT(22)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED           BIT(21)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN         BIT(20)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN             BIT(19)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023              BIT(18)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6              BIT(17)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN         BIT(16)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN             BIT(15)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023              BIT(14)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6              BIT(13)
+#define ALX_HRTBT_EXT_CTRL_NS_EN                       BIT(12)
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK               0xFF
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT              4
+#define ALX_HRTBT_EXT_CTRL_IS_8023                     BIT(3)
+#define ALX_HRTBT_EXT_CTRL_IS_IPV6                     BIT(2)
+#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN                   BIT(1)
+#define ALX_HRTBT_EXT_CTRL_ARP_EN                      BIT(0)
+
+#define ALX_HRTBT_REM_IPV4_ADDR                                0x1AD4
+#define ALX_HRTBT_HOST_IPV4_ADDR                       0x1478
+#define ALX_HRTBT_REM_IPV6_ADDR3                       0x1AD8
+#define ALX_HRTBT_REM_IPV6_ADDR2                       0x1ADC
+#define ALX_HRTBT_REM_IPV6_ADDR1                       0x1AE0
+#define ALX_HRTBT_REM_IPV6_ADDR0                       0x1AE4
+
+/* 1B8C ~ 1B94 for C0+ */
+#define ALX_SWOI_ACER_CTRL                             0x1B8C
+#define ALX_SWOI_ORIG_ACK_NAK_EN                       BIT(20)
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK             0XFF
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT            12
+#define ALX_SWOI_ORIG_ACK_ADDR_MASK                    0XFFF
+#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT                   0
+
+#define ALX_SWOI_IOAC_CTRL_2                           0x1B90
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK      0xFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT     24
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK       0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT      12
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK      0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT     0
+
+#define ALX_SWOI_IOAC_CTRL_3                           0x1B94
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK      0xFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT     24
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK       0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT      12
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK      0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT     0
+
+/* for B0 */
+#define ALX_IDLE_DECISN_TIMER                          0x1474
+/* 1ms */
+#define ALX_IDLE_DECISN_TIMER_DEF                      0x400
+
+#define ALX_MAC_CTRL                                   0x1480
+#define ALX_MAC_CTRL_FAST_PAUSE                                BIT(31)
+#define ALX_MAC_CTRL_WOLSPED_SWEN                      BIT(30)
+/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
+#define ALX_MAC_CTRL_MHASH_ALG_HI5B                    BIT(29)
+#define ALX_MAC_CTRL_BRD_EN                            BIT(26)
+#define ALX_MAC_CTRL_MULTIALL_EN                       BIT(25)
+#define ALX_MAC_CTRL_SPEED_MASK                                0x3
+#define ALX_MAC_CTRL_SPEED_SHIFT                       20
+#define ALX_MAC_CTRL_SPEED_10_100                      1
+#define ALX_MAC_CTRL_SPEED_1000                                2
+#define ALX_MAC_CTRL_PROMISC_EN                                BIT(15)
+#define ALX_MAC_CTRL_VLANSTRIP                         BIT(14)
+#define ALX_MAC_CTRL_PRMBLEN_MASK                      0xF
+#define ALX_MAC_CTRL_PRMBLEN_SHIFT                     10
+#define ALX_MAC_CTRL_PCRCE                             BIT(7)
+#define ALX_MAC_CTRL_CRCE                              BIT(6)
+#define ALX_MAC_CTRL_FULLD                             BIT(5)
+#define ALX_MAC_CTRL_RXFC_EN                           BIT(3)
+#define ALX_MAC_CTRL_TXFC_EN                           BIT(2)
+#define ALX_MAC_CTRL_RX_EN                             BIT(1)
+#define ALX_MAC_CTRL_TX_EN                             BIT(0)
+
+#define ALX_STAD0                                      0x1488
+#define ALX_STAD1                                      0x148C
+
+#define ALX_HASH_TBL0                                  0x1490
+#define ALX_HASH_TBL1                                  0x1494
+
+#define ALX_MTU                                                0x149C
+#define ALX_MTU_JUMBO_TH                               1514
+#define ALX_MTU_STD_ALGN                               1536
+
+#define ALX_SRAM5                                      0x1524
+#define ALX_SRAM_RXF_LEN_MASK                          0xFFF
+#define ALX_SRAM_RXF_LEN_SHIFT                         0
+#define ALX_SRAM_RXF_LEN_8K                            (8*1024)
+
+#define ALX_SRAM9                                      0x1534
+#define ALX_SRAM_LOAD_PTR                              BIT(0)
+
+#define ALX_RX_BASE_ADDR_HI                            0x1540
+
+#define ALX_TX_BASE_ADDR_HI                            0x1544
+
+#define ALX_RFD_ADDR_LO                                        0x1550
+#define ALX_RFD_RING_SZ                                        0x1560
+#define ALX_RFD_BUF_SZ                                 0x1564
+
+#define ALX_RRD_ADDR_LO                                        0x1568
+#define ALX_RRD_RING_SZ                                        0x1578
+
+/* pri3: highest, pri0: lowest */
+#define ALX_TPD_PRI3_ADDR_LO                           0x14E4
+#define ALX_TPD_PRI2_ADDR_LO                           0x14E0
+#define ALX_TPD_PRI1_ADDR_LO                           0x157C
+#define ALX_TPD_PRI0_ADDR_LO                           0x1580
+
+/* producer index is 16bit */
+#define ALX_TPD_PRI3_PIDX                              0x1618
+#define ALX_TPD_PRI2_PIDX                              0x161A
+#define ALX_TPD_PRI1_PIDX                              0x15F0
+#define ALX_TPD_PRI0_PIDX                              0x15F2
+
+/* consumer index is 16bit */
+#define ALX_TPD_PRI3_CIDX                              0x161C
+#define ALX_TPD_PRI2_CIDX                              0x161E
+#define ALX_TPD_PRI1_CIDX                              0x15F4
+#define ALX_TPD_PRI0_CIDX                              0x15F6
+
+#define ALX_TPD_RING_SZ                                        0x1584
+
+#define ALX_TXQ0                                       0x1590
+#define ALX_TXQ0_TXF_BURST_PREF_MASK                   0xFFFF
+#define ALX_TXQ0_TXF_BURST_PREF_SHIFT                  16
+#define ALX_TXQ_TXF_BURST_PREF_DEF                     0x200
+#define ALX_TXQ0_LSO_8023_EN                           BIT(7)
+#define ALX_TXQ0_MODE_ENHANCE                          BIT(6)
+#define ALX_TXQ0_EN                                    BIT(5)
+#define ALX_TXQ0_SUPT_IPOPT                            BIT(4)
+#define ALX_TXQ0_TPD_BURSTPREF_MASK                    0xF
+#define ALX_TXQ0_TPD_BURSTPREF_SHIFT                   0
+#define ALX_TXQ_TPD_BURSTPREF_DEF                      5
+
+#define ALX_TXQ1                                       0x1594
+/* bit11:  drop large packet, len > (rfd buf) */
+#define ALX_TXQ1_ERRLGPKT_DROP_EN                      BIT(11)
+#define ALX_TXQ1_JUMBO_TSO_TH                          (7*1024)
+
+#define ALX_RXQ0                                       0x15A0
+#define ALX_RXQ0_EN                                    BIT(31)
+#define ALX_RXQ0_RSS_HASH_EN                           BIT(29)
+#define ALX_RXQ0_RSS_MODE_MASK                         0x3
+#define ALX_RXQ0_RSS_MODE_SHIFT                                26
+#define ALX_RXQ0_RSS_MODE_DIS                          0
+#define ALX_RXQ0_RSS_MODE_MQMI                         3
+#define ALX_RXQ0_NUM_RFD_PREF_MASK                     0x3F
+#define ALX_RXQ0_NUM_RFD_PREF_SHIFT                    20
+#define ALX_RXQ0_NUM_RFD_PREF_DEF                      8
+#define ALX_RXQ0_IDT_TBL_SIZE_MASK                     0x1FF
+#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT                    8
+#define ALX_RXQ0_IDT_TBL_SIZE_DEF                      0x100
+#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL                   128
+#define ALX_RXQ0_IPV6_PARSE_EN                         BIT(7)
+#define ALX_RXQ0_RSS_HSTYP_MASK                                0xF
+#define ALX_RXQ0_RSS_HSTYP_SHIFT                       2
+#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN                 BIT(5)
+#define ALX_RXQ0_RSS_HSTYP_IPV6_EN                     BIT(4)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN                 BIT(3)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_EN                     BIT(2)
+#define ALX_RXQ0_RSS_HSTYP_ALL         (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
+                                        ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
+                                        ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
+                                        ALX_RXQ0_RSS_HSTYP_IPV4_EN)
+#define ALX_RXQ0_ASPM_THRESH_MASK                      0x3
+#define ALX_RXQ0_ASPM_THRESH_SHIFT                     0
+#define ALX_RXQ0_ASPM_THRESH_100M                      3
+
+#define ALX_RXQ2                                       0x15A8
+#define ALX_RXQ2_RXF_XOFF_THRESH_MASK                  0xFFF
+#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT                 16
+#define ALX_RXQ2_RXF_XON_THRESH_MASK                   0xFFF
+#define ALX_RXQ2_RXF_XON_THRESH_SHIFT                  0
+/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
+ *        rx-packet(1522) + delay-of-link(64)
+ *      = 3212.
+ */
+#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD                    3212
+
+#define ALX_DMA                                                0x15C0
+#define ALX_DMA_RCHNL_SEL_MASK                         0x3
+#define ALX_DMA_RCHNL_SEL_SHIFT                                26
+#define ALX_DMA_WDLY_CNT_MASK                          0xF
+#define ALX_DMA_WDLY_CNT_SHIFT                         16
+#define ALX_DMA_WDLY_CNT_DEF                           4
+#define ALX_DMA_RDLY_CNT_MASK                          0x1F
+#define ALX_DMA_RDLY_CNT_SHIFT                         11
+#define ALX_DMA_RDLY_CNT_DEF                           15
+/* bit10: 0:tpd with pri, 1: data */
+#define ALX_DMA_RREQ_PRI_DATA                          BIT(10)
+#define ALX_DMA_RREQ_BLEN_MASK                         0x7
+#define ALX_DMA_RREQ_BLEN_SHIFT                                4
+#define ALX_DMA_RORDER_MODE_MASK                       0x7
+#define ALX_DMA_RORDER_MODE_SHIFT                      0
+#define ALX_DMA_RORDER_MODE_OUT                                4
+
+#define ALX_WOL0                                       0x14A0
+#define ALX_WOL0_PME_LINK                              BIT(5)
+#define ALX_WOL0_LINK_EN                               BIT(4)
+#define ALX_WOL0_PME_MAGIC_EN                          BIT(3)
+#define ALX_WOL0_MAGIC_EN                              BIT(2)
+
+#define ALX_RFD_PIDX                                   0x15E0
+
+#define ALX_RFD_CIDX                                   0x15F8
+
+/* MIB */
+#define ALX_MIB_BASE                                   0x1700
+#define ALX_MIB_RX_OK                                  (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_ERRADDR                             (ALX_MIB_BASE + 92)
+#define ALX_MIB_TX_OK                                  (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_MCCNT                               (ALX_MIB_BASE + 192)
+
+#define ALX_RX_STATS_BIN                               ALX_MIB_RX_OK
+#define ALX_RX_STATS_END                               ALX_MIB_RX_ERRADDR
+#define ALX_TX_STATS_BIN                               ALX_MIB_TX_OK
+#define ALX_TX_STATS_END                               ALX_MIB_TX_MCCNT
+
+#define ALX_ISR                                                0x1600
+#define ALX_ISR_DIS                                    BIT(31)
+#define ALX_ISR_RX_Q7                                  BIT(30)
+#define ALX_ISR_RX_Q6                                  BIT(29)
+#define ALX_ISR_RX_Q5                                  BIT(28)
+#define ALX_ISR_RX_Q4                                  BIT(27)
+#define ALX_ISR_PCIE_LNKDOWN                           BIT(26)
+#define ALX_ISR_RX_Q3                                  BIT(19)
+#define ALX_ISR_RX_Q2                                  BIT(18)
+#define ALX_ISR_RX_Q1                                  BIT(17)
+#define ALX_ISR_RX_Q0                                  BIT(16)
+#define ALX_ISR_TX_Q0                                  BIT(15)
+#define ALX_ISR_PHY                                    BIT(12)
+#define ALX_ISR_DMAW                                   BIT(10)
+#define ALX_ISR_DMAR                                   BIT(9)
+#define ALX_ISR_TXF_UR                                 BIT(8)
+#define ALX_ISR_TX_Q3                                  BIT(7)
+#define ALX_ISR_TX_Q2                                  BIT(6)
+#define ALX_ISR_TX_Q1                                  BIT(5)
+#define ALX_ISR_RFD_UR                                 BIT(4)
+#define ALX_ISR_RXF_OV                                 BIT(3)
+#define ALX_ISR_MANU                                   BIT(2)
+#define ALX_ISR_TIMER                                  BIT(1)
+#define ALX_ISR_SMB                                    BIT(0)
+
+#define ALX_IMR                                                0x1604
+
+/* re-send assert msg if SW no response */
+#define ALX_INT_RETRIG                                 0x1608
+/* 40ms */
+#define ALX_INT_RETRIG_TO                              20000
+
+#define ALX_SMB_TIMER                                  0x15C4
+
+#define ALX_TINT_TPD_THRSHLD                           0x15C8
+
+#define ALX_TINT_TIMER                                 0x15CC
+
+#define ALX_CLK_GATE                                   0x1814
+#define ALX_CLK_GATE_RXMAC                             BIT(5)
+#define ALX_CLK_GATE_TXMAC                             BIT(4)
+#define ALX_CLK_GATE_RXQ                               BIT(3)
+#define ALX_CLK_GATE_TXQ                               BIT(2)
+#define ALX_CLK_GATE_DMAR                              BIT(1)
+#define ALX_CLK_GATE_DMAW                              BIT(0)
+#define ALX_CLK_GATE_ALL               (ALX_CLK_GATE_RXMAC | \
+                                        ALX_CLK_GATE_TXMAC | \
+                                        ALX_CLK_GATE_RXQ | \
+                                        ALX_CLK_GATE_TXQ | \
+                                        ALX_CLK_GATE_DMAR | \
+                                        ALX_CLK_GATE_DMAW)
+
+/* interop between drivers */
+#define ALX_DRV                                                0x1804
+#define ALX_DRV_PHY_AUTO                               BIT(28)
+#define ALX_DRV_PHY_1000                               BIT(27)
+#define ALX_DRV_PHY_100                                        BIT(26)
+#define ALX_DRV_PHY_10                                 BIT(25)
+#define ALX_DRV_PHY_DUPLEX                             BIT(24)
+/* bit23: adv Pause */
+#define ALX_DRV_PHY_PAUSE                              BIT(23)
+/* bit22: adv Asym Pause */
+#define ALX_DRV_PHY_MASK                               0xFF
+#define ALX_DRV_PHY_SHIFT                              21
+#define ALX_DRV_PHY_UNKNOWN                            0
+
+/* flag of phy inited */
+#define ALX_PHY_INITED                                 0x003F
+
+/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
+#define ALX_WOL_CTRL2                                  0x1830
+#define ALX_WOL_CTRL2_DATA_STORE                       BIT(3)
+#define ALX_WOL_CTRL2_PTRN_EVT                         BIT(2)
+#define ALX_WOL_CTRL2_PME_PTRN_EN                      BIT(1)
+#define ALX_WOL_CTRL2_PTRN_EN                          BIT(0)
+
+#define ALX_WOL_CTRL3                                  0x1834
+#define ALX_WOL_CTRL3_PTRN_ADDR_MASK                   0xFFFFF
+#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT                  0
+
+#define ALX_WOL_CTRL4                                  0x1838
+#define ALX_WOL_CTRL4_PT15_MATCH                       BIT(31)
+#define ALX_WOL_CTRL4_PT14_MATCH                       BIT(30)
+#define ALX_WOL_CTRL4_PT13_MATCH                       BIT(29)
+#define ALX_WOL_CTRL4_PT12_MATCH                       BIT(28)
+#define ALX_WOL_CTRL4_PT11_MATCH                       BIT(27)
+#define ALX_WOL_CTRL4_PT10_MATCH                       BIT(26)
+#define ALX_WOL_CTRL4_PT9_MATCH                                BIT(25)
+#define ALX_WOL_CTRL4_PT8_MATCH                                BIT(24)
+#define ALX_WOL_CTRL4_PT7_MATCH                                BIT(23)
+#define ALX_WOL_CTRL4_PT6_MATCH                                BIT(22)
+#define ALX_WOL_CTRL4_PT5_MATCH                                BIT(21)
+#define ALX_WOL_CTRL4_PT4_MATCH                                BIT(20)
+#define ALX_WOL_CTRL4_PT3_MATCH                                BIT(19)
+#define ALX_WOL_CTRL4_PT2_MATCH                                BIT(18)
+#define ALX_WOL_CTRL4_PT1_MATCH                                BIT(17)
+#define ALX_WOL_CTRL4_PT0_MATCH                                BIT(16)
+#define ALX_WOL_CTRL4_PT15_EN                          BIT(15)
+#define ALX_WOL_CTRL4_PT14_EN                          BIT(14)
+#define ALX_WOL_CTRL4_PT13_EN                          BIT(13)
+#define ALX_WOL_CTRL4_PT12_EN                          BIT(12)
+#define ALX_WOL_CTRL4_PT11_EN                          BIT(11)
+#define ALX_WOL_CTRL4_PT10_EN                          BIT(10)
+#define ALX_WOL_CTRL4_PT9_EN                           BIT(9)
+#define ALX_WOL_CTRL4_PT8_EN                           BIT(8)
+#define ALX_WOL_CTRL4_PT7_EN                           BIT(7)
+#define ALX_WOL_CTRL4_PT6_EN                           BIT(6)
+#define ALX_WOL_CTRL4_PT5_EN                           BIT(5)
+#define ALX_WOL_CTRL4_PT4_EN                           BIT(4)
+#define ALX_WOL_CTRL4_PT3_EN                           BIT(3)
+#define ALX_WOL_CTRL4_PT2_EN                           BIT(2)
+#define ALX_WOL_CTRL4_PT1_EN                           BIT(1)
+#define ALX_WOL_CTRL4_PT0_EN                           BIT(0)
+
+#define ALX_WOL_CTRL5                                  0x183C
+#define ALX_WOL_CTRL5_PT3_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT3_LEN_SHIFT                    24
+#define ALX_WOL_CTRL5_PT2_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT2_LEN_SHIFT                    16
+#define ALX_WOL_CTRL5_PT1_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT1_LEN_SHIFT                    8
+#define ALX_WOL_CTRL5_PT0_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT0_LEN_SHIFT                    0
+
+#define ALX_WOL_CTRL6                                  0x1840
+#define ALX_WOL_CTRL5_PT7_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT7_LEN_SHIFT                    24
+#define ALX_WOL_CTRL5_PT6_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT6_LEN_SHIFT                    16
+#define ALX_WOL_CTRL5_PT5_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT5_LEN_SHIFT                    8
+#define ALX_WOL_CTRL5_PT4_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT4_LEN_SHIFT                    0
+
+#define ALX_WOL_CTRL7                                  0x1844
+#define ALX_WOL_CTRL5_PT11_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT11_LEN_SHIFT                   24
+#define ALX_WOL_CTRL5_PT10_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT10_LEN_SHIFT                   16
+#define ALX_WOL_CTRL5_PT9_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT9_LEN_SHIFT                    8
+#define ALX_WOL_CTRL5_PT8_LEN_MASK                     0xFF
+#define ALX_WOL_CTRL5_PT8_LEN_SHIFT                    0
+
+#define ALX_WOL_CTRL8                                  0x1848
+#define ALX_WOL_CTRL5_PT15_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT15_LEN_SHIFT                   24
+#define ALX_WOL_CTRL5_PT14_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT14_LEN_SHIFT                   16
+#define ALX_WOL_CTRL5_PT13_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT13_LEN_SHIFT                   8
+#define ALX_WOL_CTRL5_PT12_LEN_MASK                    0xFF
+#define ALX_WOL_CTRL5_PT12_LEN_SHIFT                   0
+
+#define ALX_ACER_FIXED_PTN0                            0x1850
+#define ALX_ACER_FIXED_PTN0_MASK                       0xFFFFFFFF
+#define ALX_ACER_FIXED_PTN0_SHIFT                      0
+
+#define ALX_ACER_FIXED_PTN1                            0x1854
+#define ALX_ACER_FIXED_PTN1_MASK                       0xFFFF
+#define ALX_ACER_FIXED_PTN1_SHIFT                      0
+
+#define ALX_ACER_RANDOM_NUM0                           0x1858
+#define ALX_ACER_RANDOM_NUM0_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM0_SHIFT                     0
+
+#define ALX_ACER_RANDOM_NUM1                           0x185C
+#define ALX_ACER_RANDOM_NUM1_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM1_SHIFT                     0
+
+#define ALX_ACER_RANDOM_NUM2                           0x1860
+#define ALX_ACER_RANDOM_NUM2_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM2_SHIFT                     0
+
+#define ALX_ACER_RANDOM_NUM3                           0x1864
+#define ALX_ACER_RANDOM_NUM3_MASK                      0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM3_SHIFT                     0
+
+#define ALX_ACER_MAGIC                                 0x1868
+#define ALX_ACER_MAGIC_EN                              BIT(31)
+#define ALX_ACER_MAGIC_PME_EN                          BIT(30)
+#define ALX_ACER_MAGIC_MATCH                           BIT(29)
+#define ALX_ACER_MAGIC_FF_CHECK                                BIT(10)
+#define ALX_ACER_MAGIC_RAN_LEN_MASK                    0x1F
+#define ALX_ACER_MAGIC_RAN_LEN_SHIFT                   5
+#define ALX_ACER_MAGIC_FIX_LEN_MASK                    0x1F
+#define ALX_ACER_MAGIC_FIX_LEN_SHIFT                   0
+
+#define ALX_ACER_TIMER                                 0x186C
+#define ALX_ACER_TIMER_EN                              BIT(31)
+#define ALX_ACER_TIMER_PME_EN                          BIT(30)
+#define ALX_ACER_TIMER_MATCH                           BIT(29)
+#define ALX_ACER_TIMER_THRES_MASK                      0x1FFFF
+#define ALX_ACER_TIMER_THRES_SHIFT                     0
+#define ALX_ACER_TIMER_THRES_DEF                       1
+
+/* RSS definitions */
+#define ALX_RSS_KEY0                                   0x14B0
+#define ALX_RSS_KEY1                                   0x14B4
+#define ALX_RSS_KEY2                                   0x14B8
+#define ALX_RSS_KEY3                                   0x14BC
+#define ALX_RSS_KEY4                                   0x14C0
+#define ALX_RSS_KEY5                                   0x14C4
+#define ALX_RSS_KEY6                                   0x14C8
+#define ALX_RSS_KEY7                                   0x14CC
+#define ALX_RSS_KEY8                                   0x14D0
+#define ALX_RSS_KEY9                                   0x14D4
+
+#define ALX_RSS_IDT_TBL0                               0x1B00
+
+#define ALX_MSI_MAP_TBL1                               0x15D0
+#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT                    20
+#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT                    16
+#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT                    12
+#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT                    8
+#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT                    4
+#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT                    0
+
+#define ALX_MSI_MAP_TBL2                               0x15D8
+#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT                    20
+#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT                    16
+#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT                    12
+#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT                    8
+#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT                    4
+#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT                    0
+
+#define ALX_MSI_ID_MAP                                 0x15D4
+
+#define ALX_MSI_RETRANS_TIMER                          0x1920
+/* bit16: 1:line,0:standard */
+#define ALX_MSI_MASK_SEL_LINE                          BIT(16)
+#define ALX_MSI_RETRANS_TM_MASK                                0xFFFF
+#define ALX_MSI_RETRANS_TM_SHIFT                       0
+
+/* CR DMA ctrl */
+
+/* TX QoS */
+#define ALX_WRR                                                0x1938
+#define ALX_WRR_PRI_MASK                               0x3
+#define ALX_WRR_PRI_SHIFT                              29
+#define ALX_WRR_PRI_RESTRICT_NONE                      3
+#define ALX_WRR_PRI3_MASK                              0x1F
+#define ALX_WRR_PRI3_SHIFT                             24
+#define ALX_WRR_PRI2_MASK                              0x1F
+#define ALX_WRR_PRI2_SHIFT                             16
+#define ALX_WRR_PRI1_MASK                              0x1F
+#define ALX_WRR_PRI1_SHIFT                             8
+#define ALX_WRR_PRI0_MASK                              0x1F
+#define ALX_WRR_PRI0_SHIFT                             0
+
+#define ALX_HQTPD                                      0x193C
+#define ALX_HQTPD_BURST_EN                             BIT(31)
+#define ALX_HQTPD_Q3_NUMPREF_MASK                      0xF
+#define ALX_HQTPD_Q3_NUMPREF_SHIFT                     8
+#define ALX_HQTPD_Q2_NUMPREF_MASK                      0xF
+#define ALX_HQTPD_Q2_NUMPREF_SHIFT                     4
+#define ALX_HQTPD_Q1_NUMPREF_MASK                      0xF
+#define ALX_HQTPD_Q1_NUMPREF_SHIFT                     0
+
+#define ALX_MISC                                       0x19C0
+#define ALX_MISC_PSW_OCP_MASK                          0x7
+#define ALX_MISC_PSW_OCP_SHIFT                         21
+#define ALX_MISC_PSW_OCP_DEF                           0x7
+#define ALX_MISC_ISO_EN                                        BIT(12)
+#define ALX_MISC_INTNLOSC_OPEN                         BIT(3)
+
+#define ALX_MSIC2                                      0x19C8
+#define ALX_MSIC2_CALB_START                           BIT(0)
+
+#define ALX_MISC3                                      0x19CC
+/* bit1: 1:Software control 25M */
+#define ALX_MISC3_25M_BY_SW                            BIT(1)
+/* bit0: 25M switch to intnl OSC */
+#define ALX_MISC3_25M_NOTO_INTNL                       BIT(0)
+
+/* MSIX tbl in memory space */
+#define ALX_MSIX_ENTRY_BASE                            0x2000
+
+/********************* PHY regs definition ***************************/
+
+/* PHY Specific Status Register */
+#define ALX_MII_GIGA_PSSR                              0x11
+#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED                        0x0800
+#define ALX_GIGA_PSSR_DPLX                             0x2000
+#define ALX_GIGA_PSSR_SPEED                            0xC000
+#define ALX_GIGA_PSSR_10MBS                            0x0000
+#define ALX_GIGA_PSSR_100MBS                           0x4000
+#define ALX_GIGA_PSSR_1000MBS                          0x8000
+
+/* PHY Interrupt Enable Register */
+#define ALX_MII_IER                                    0x12
+#define ALX_IER_LINK_UP                                        0x0400
+#define ALX_IER_LINK_DOWN                              0x0800
+
+/* PHY Interrupt Status Register */
+#define ALX_MII_ISR                                    0x13
+
+#define ALX_MII_DBG_ADDR                               0x1D
+#define ALX_MII_DBG_DATA                               0x1E
+
+/***************************** debug port *************************************/
+
+#define ALX_MIIDBG_ANACTRL                             0x00
+#define ALX_ANACTRL_DEF                                        0x02EF
+
+#define ALX_MIIDBG_SYSMODCTRL                          0x04
+/* en half bias */
+#define ALX_SYSMODCTRL_IECHOADJ_DEF                    0xBB8B
+
+#define ALX_MIIDBG_SRDSYSMOD                           0x05
+#define ALX_SRDSYSMOD_DEEMP_EN                         0x0040
+#define ALX_SRDSYSMOD_DEF                              0x2C46
+
+#define ALX_MIIDBG_HIBNEG                              0x0B
+#define ALX_HIBNEG_PSHIB_EN                            0x8000
+#define ALX_HIBNEG_HIB_PSE                             0x1000
+#define ALX_HIBNEG_DEF                                 0xBC40
+#define ALX_HIBNEG_NOHIB       (ALX_HIBNEG_DEF & \
+                                ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
+
+#define ALX_MIIDBG_TST10BTCFG                          0x12
+#define ALX_TST10BTCFG_DEF                             0x4C04
+
+#define ALX_MIIDBG_AZ_ANADECT                          0x15
+#define ALX_AZ_ANADECT_DEF                             0x3220
+#define ALX_AZ_ANADECT_LONG                            0x3210
+
+#define ALX_MIIDBG_MSE16DB                             0x18
+#define ALX_MSE16DB_UP                                 0x05EA
+#define ALX_MSE16DB_DOWN                               0x02EA
+
+#define ALX_MIIDBG_MSE20DB                             0x1C
+#define ALX_MSE20DB_TH_MASK                            0x7F
+#define ALX_MSE20DB_TH_SHIFT                           2
+#define ALX_MSE20DB_TH_DEF                             0x2E
+#define ALX_MSE20DB_TH_HI                              0x54
+
+#define ALX_MIIDBG_AGC                                 0x23
+#define ALX_AGC_2_VGA_MASK                             0x3FU
+#define ALX_AGC_2_VGA_SHIFT                            8
+#define ALX_AGC_LONG1G_LIMT                            40
+#define ALX_AGC_LONG100M_LIMT                          44
+
+#define ALX_MIIDBG_LEGCYPS                             0x29
+#define ALX_LEGCYPS_EN                                 0x8000
+#define ALX_LEGCYPS_DEF                                        0x129D
+
+#define ALX_MIIDBG_TST100BTCFG                         0x36
+#define ALX_TST100BTCFG_DEF                            0xE12C
+
+#define ALX_MIIDBG_GREENCFG                            0x3B
+#define ALX_GREENCFG_DEF                               0x7078
+
+#define ALX_MIIDBG_GREENCFG2                           0x3D
+#define ALX_GREENCFG2_BP_GREEN                         0x8000
+#define ALX_GREENCFG2_GATE_DFSE_EN                     0x0080
+
+/******* dev 3 *********/
+#define ALX_MIIEXT_PCS                                 3
+
+#define ALX_MIIEXT_CLDCTRL3                            0x8003
+#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT                        0x8000
+
+#define ALX_MIIEXT_CLDCTRL5                            0x8005
+#define ALX_CLDCTRL5_BP_VD_HLFBIAS                     0x4000
+
+#define ALX_MIIEXT_CLDCTRL6                            0x8006
+#define ALX_CLDCTRL6_CAB_LEN_MASK                      0xFF
+#define ALX_CLDCTRL6_CAB_LEN_SHIFT                     0
+#define ALX_CLDCTRL6_CAB_LEN_SHORT1G                   116
+#define ALX_CLDCTRL6_CAB_LEN_SHORT100M                 152
+
+#define ALX_MIIEXT_VDRVBIAS                            0x8062
+#define ALX_VDRVBIAS_DEF                               0x3
+
+/********* dev 7 **********/
+#define ALX_MIIEXT_ANEG                                        7
+
+#define ALX_MIIEXT_LOCAL_EEEADV                                0x3C
+#define ALX_LOCAL_EEEADV_1000BT                                0x0004
+#define ALX_LOCAL_EEEADV_100BT                         0x0002
+
+#define ALX_MIIEXT_AFE                                 0x801A
+#define ALX_AFE_10BT_100M_TH                           0x0040
+
+#define ALX_MIIEXT_S3DIG10                             0x8023
+/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
+#define ALX_MIIEXT_S3DIG10_SL                          0x0001
+#define ALX_MIIEXT_S3DIG10_DEF                         0
+
+#define ALX_MIIEXT_NLP78                               0x8027
+#define ALX_MIIEXT_NLP78_120M_DEF                      0x8A05
+
+#endif
index c777b9013164ad7562106340a877e126cc78a175..a13463e8a2c3401ac4f02715dc44857ed330b5e5 100644 (file)
@@ -744,6 +744,9 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
                status = tg3_ape_read32(tp, gnt + off);
                if (status == bit)
                        break;
+               if (pci_channel_offline(tp->pdev))
+                       break;
+
                udelay(10);
        }
 
@@ -1635,6 +1638,9 @@ static void tg3_wait_for_event_ack(struct tg3 *tp)
        for (i = 0; i < delay_cnt; i++) {
                if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
                        break;
+               if (pci_channel_offline(tp->pdev))
+                       break;
+
                udelay(8);
        }
 }
@@ -1813,6 +1819,9 @@ static int tg3_poll_fw(struct tg3 *tp)
                for (i = 0; i < 200; i++) {
                        if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
                                return 0;
+                       if (pci_channel_offline(tp->pdev))
+                               return -ENODEV;
+
                        udelay(100);
                }
                return -ENODEV;
@@ -1823,6 +1832,15 @@ static int tg3_poll_fw(struct tg3 *tp)
                tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
                if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
                        break;
+               if (pci_channel_offline(tp->pdev)) {
+                       if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
+                               tg3_flag_set(tp, NO_FWARE_REPORTED);
+                               netdev_info(tp->dev, "No firmware running\n");
+                       }
+
+                       break;
+               }
+
                udelay(10);
        }
 
@@ -3520,6 +3538,8 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
                tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
                if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
                        break;
+               if (pci_channel_offline(tp->pdev))
+                       return -EBUSY;
        }
 
        return (i == iters) ? -EBUSY : 0;
@@ -8589,6 +8609,14 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo
        tw32_f(ofs, val);
 
        for (i = 0; i < MAX_WAIT_CNT; i++) {
+               if (pci_channel_offline(tp->pdev)) {
+                       dev_err(&tp->pdev->dev,
+                               "tg3_stop_block device offline, "
+                               "ofs=%lx enable_bit=%x\n",
+                               ofs, enable_bit);
+                       return -ENODEV;
+               }
+
                udelay(100);
                val = tr32(ofs);
                if ((val & enable_bit) == 0)
@@ -8612,6 +8640,13 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
 
        tg3_disable_ints(tp);
 
+       if (pci_channel_offline(tp->pdev)) {
+               tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
+               tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+               err = -ENODEV;
+               goto err_no_dev;
+       }
+
        tp->rx_mode &= ~RX_MODE_ENABLE;
        tw32_f(MAC_RX_MODE, tp->rx_mode);
        udelay(10);
@@ -8660,6 +8695,7 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
        err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
        err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 
+err_no_dev:
        for (i = 0; i < tp->irq_cnt; i++) {
                struct tg3_napi *tnapi = &tp->napi[i];
                if (tnapi->hw_status)
index 6e8bc9d88c418c5eff9838948303c16c71019cc1..94d957d203a6466b9b809d49962f6630ed2bc7cb 100644 (file)
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
                file->f_pos += offset;
                break;
        case 2:
-               file->f_pos = debug->buffer_len - offset;
+               file->f_pos = debug->buffer_len + offset;
                break;
        default:
                return -EINVAL;
index a667015be22ada3a3bb85628620584e4fb1982e3..d48099f03b7f619b15959a4f4fd2b4db2b491805 100644 (file)
@@ -516,6 +516,7 @@ fec_restart(struct net_device *ndev, int duplex)
        /* Set MII speed */
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
+#if !defined(CONFIG_M5272)
        /* set RX checksum */
        val = readl(fep->hwp + FEC_RACC);
        if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -523,6 +524,7 @@ fec_restart(struct net_device *ndev, int duplex)
        else
                val &= ~FEC_RACC_OPTIONS;
        writel(val, fep->hwp + FEC_RACC);
+#endif
 
        /*
         * The phy interface and speed need to get configured
@@ -575,6 +577,7 @@ fec_restart(struct net_device *ndev, int duplex)
 #endif
        }
 
+#if !defined(CONFIG_M5272)
        /* enable pause frame*/
        if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
            ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
@@ -592,6 +595,7 @@ fec_restart(struct net_device *ndev, int duplex)
        } else {
                rcntl &= ~FEC_ENET_FCE;
        }
+#endif /* !defined(CONFIG_M5272) */
 
        writel(rcntl, fep->hwp + FEC_R_CNTRL);
 
@@ -1205,7 +1209,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        /* mask with MAC supported features */
        if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
                phy_dev->supported &= PHY_GBIT_FEATURES;
+#if !defined(CONFIG_M5272)
                phy_dev->supported |= SUPPORTED_Pause;
+#endif
        }
        else
                phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1390,6 +1396,8 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
        }
 }
 
+#if !defined(CONFIG_M5272)
+
 static void fec_enet_get_pauseparam(struct net_device *ndev,
                                    struct ethtool_pauseparam *pause)
 {
@@ -1436,9 +1444,13 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
        return 0;
 }
 
+#endif /* !defined(CONFIG_M5272) */
+
 static const struct ethtool_ops fec_enet_ethtool_ops = {
+#if !defined(CONFIG_M5272)
        .get_pauseparam         = fec_enet_get_pauseparam,
        .set_pauseparam         = fec_enet_set_pauseparam,
+#endif
        .get_settings           = fec_enet_get_settings,
        .set_settings           = fec_enet_set_settings,
        .get_drvinfo            = fec_enet_get_drvinfo,
@@ -1874,10 +1886,12 @@ fec_probe(struct platform_device *pdev)
        /* setup board info structure */
        fep = netdev_priv(ndev);
 
+#if !defined(CONFIG_M5272)
        /* default enable pause frame auto negotiation */
        if (pdev->id_entry &&
            (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+#endif
 
        fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
        fep->pdev = pdev;
index 2ad1494efbb3021e796481da6c4f48f21e63ca30..d1cbfb12c1ca35cb9f0c0521a4b38bb847cbd199 100644 (file)
@@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
        memset(rxq->rx_desc_area, 0, size);
 
        rxq->rx_desc_area_size = size;
-       rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+       rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
                                    GFP_KERNEL);
        if (rxq->rx_skb == NULL)
                goto out_free;
index 339bb323cb0c99c91bf2741cbfea918e7ae4499d..1c8af8ba08d92fdfe62e7ace6288d2b823823c67 100644 (file)
@@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev)
        int rx_desc_num = pep->rx_ring_size;
 
        /* Allocate RX skb rings */
-       pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+       pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
                             GFP_KERNEL);
        if (!pep->rx_skb)
                return -ENOMEM;
@@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev)
        int size = 0, i = 0;
        int tx_desc_num = pep->tx_ring_size;
 
-       pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+       pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
                             GFP_KERNEL);
        if (!pep->tx_skb)
                return -ENOMEM;
index 2f4a26039e801270b5d634abe43bc6de5ab3309f..8a434997a0df38a5266e40d7238b55956499458c 100644 (file)
@@ -632,6 +632,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
                dev->caps.cqe_size   = 32;
        }
 
+       dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
+       mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+
        slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
 
        return 0;
index 921729f9c85c53baa17cd334f4a7b8f35f590561..91a8a5d280379e58f4f0634fb482bb6388e6a4b8 100644 (file)
 union mgmt_port_ring_entry {
        u64 d64;
        struct {
-               u64    reserved_62_63:2;
+#define RING_ENTRY_CODE_DONE 0xf
+#define RING_ENTRY_CODE_MORE 0x10
+#ifdef __BIG_ENDIAN_BITFIELD
+               u64 reserved_62_63:2;
                /* Length of the buffer/packet in bytes */
-               u64    len:14;
+               u64 len:14;
                /* For TX, signals that the packet should be timestamped */
-               u64    tstamp:1;
+               u64 tstamp:1;
                /* The RX error code */
-               u64    code:7;
-#define RING_ENTRY_CODE_DONE 0xf
-#define RING_ENTRY_CODE_MORE 0x10
+               u64 code:7;
                /* Physical address of the buffer */
-               u64    addr:40;
+               u64 addr:40;
+#else
+               u64 addr:40;
+               u64 code:7;
+               u64 tstamp:1;
+               u64 len:14;
+               u64 reserved_62_63:2;
+#endif
        } s;
 };
 
@@ -1141,10 +1149,13 @@ static int octeon_mgmt_open(struct net_device *netdev)
                /* For compensation state to lock. */
                ndelay(1040 * NS_PER_PHY_CLK);
 
-               /* Some Ethernet switches cannot handle standard
-                * Interframe Gap, increase to 16 bytes.
+               /* Default Interframe Gaps are too small.  Recommended
+                * workaround is.
+                *
+                * AGL_GMX_TX_IFG[IFG1]=14
+                * AGL_GMX_TX_IFG[IFG2]=10
                 */
-               cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
+               cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
        }
 
        octeon_mgmt_rx_fill_ring(netdev);
index 43562c256379539b8f739d4b72950886c067b75e..6acf82b9f01894d7c7e3e885e6f5cbc5760f3797 100644 (file)
@@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
                                qlcnic_83xx_config_intrpt(adapter, 0);
                }
                /* Allow dma queues to drain after context reset */
-               msleep(20);
+               mdelay(20);
        }
 }
 
index 5e3982fc539869be125fc36c1d2cefd4a907469b..e29fe8dbd226a2ddf48963a653f98c6566588270 100644 (file)
@@ -380,8 +380,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .eesipr_value   = 0x01ff009f,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
-       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
-                         EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+                         EESR_ECI,
        .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
        .apr            = 1,
@@ -427,8 +428,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
-       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
-                         EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+                         EESR_ECI,
        .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
        .apr            = 1,
@@ -478,8 +480,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .rmcr_value     = 0x00000001,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
-       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
-                         EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+                         EESR_ECI,
        .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
        .apr            = 1,
@@ -592,9 +595,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
-       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
-                         EESR_ECI,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+                         EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+                         EESR_TDE | EESR_ECI,
        .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
                          EESR_TFE,
        .fdr_value      = 0x0000072f,
@@ -674,9 +677,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
-       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
-                         EESR_ECI,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+                         EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+                         EESR_TDE | EESR_ECI,
        .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
                          EESR_TFE,
 
@@ -811,9 +814,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
-       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
-                         EESR_ECI,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+                         EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+                         EESR_TDE | EESR_ECI,
        .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
                          EESR_TFE,
 
@@ -1549,11 +1552,12 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
 
 ignore_link:
        if (intr_status & EESR_TWB) {
-               /* Write buck end. unused write back interrupt */
-               if (intr_status & EESR_TABT)    /* Transmit Abort int */
+               /* Unused write back interrupt */
+               if (intr_status & EESR_TABT) {  /* Transmit Abort int */
                        ndev->stats.tx_aborted_errors++;
                        if (netif_msg_tx_err(mdp))
                                dev_err(&ndev->dev, "Transmit Abort\n");
+               }
        }
 
        if (intr_status & EESR_RABT) {
index 1ddc9f235bcb393cd915764764eb3ab53f313e61..62689a5823be3461285a17a2257b055bc250336f 100644 (file)
@@ -253,7 +253,7 @@ enum EESR_BIT {
 
 #define DEFAULT_TX_CHECK       (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
                                 EESR_RTO)
-#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
+#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
                                 EESR_RDE | EESR_RFRMER | EESR_ADE | \
                                 EESR_TFE | EESR_TDE | EESR_ECI)
 #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
index 39e4cb39de29493d30079f3ddcfc81e46cfbf5b4..4a14a940c65e70f052a83bd7384722a4dbe4e768 100644 (file)
@@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
        return sprintf(buf, "%d\n", efx->phy_type);
 }
-static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
+static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
 
 static int efx_register_netdev(struct efx_nic *efx)
 {
index 7788fbe44f0ad35eee37a701733f3576cec271e4..95176979b2d2e046f63bbe9b8f83b0c8655b60d7 100644 (file)
@@ -297,8 +297,8 @@ struct dma_features {
 #define MAC_RNABLE_RX          0x00000004      /* Receiver Enable */
 
 /* Default LPI timers */
-#define STMMAC_DEFAULT_LIT_LS_TIMER    0x3E8
-#define STMMAC_DEFAULT_TWT_LS_TIMER    0x0
+#define STMMAC_DEFAULT_LIT_LS  0x3E8
+#define STMMAC_DEFAULT_TWT_LS  0x0
 
 #define STMMAC_CHAIN_MODE      0x1
 #define STMMAC_RING_MODE       0x2
index ee919ca8b8a0ad5828e84d7522d03218895d9b79..e9eab29db7beb96bdcb9c740f9746726f9c0206b 100644 (file)
@@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
-#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
 
 /* By default the driver will use the ring mode to manage tx and rx descriptors
  * but passing this value so user can force to use the chain instead of the ring
@@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
        struct stmmac_priv *priv = (struct stmmac_priv *)arg;
 
        stmmac_enable_eee_mode(priv);
-       mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+       mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 }
 
 /**
@@ -304,22 +304,34 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
 {
        bool ret = false;
 
+       /* Using PCS we cannot dial with the phy registers at this stage
+        * so we do not support extra feature like EEE.
+        */
+       if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
+           (priv->pcs == STMMAC_PCS_RTBI))
+               goto out;
+
        /* MAC core supports the EEE feature. */
        if (priv->dma_cap.eee) {
                /* Check if the PHY supports EEE */
                if (phy_init_eee(priv->phydev, 1))
                        goto out;
 
-               priv->eee_active = 1;
-               init_timer(&priv->eee_ctrl_timer);
-               priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
-               priv->eee_ctrl_timer.data = (unsigned long)priv;
-               priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
-               add_timer(&priv->eee_ctrl_timer);
-
-               priv->hw->mac->set_eee_timer(priv->ioaddr,
-                                            STMMAC_DEFAULT_LIT_LS_TIMER,
-                                            priv->tx_lpi_timer);
+               if (!priv->eee_active) {
+                       priv->eee_active = 1;
+                       init_timer(&priv->eee_ctrl_timer);
+                       priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+                       priv->eee_ctrl_timer.data = (unsigned long)priv;
+                       priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
+                       add_timer(&priv->eee_ctrl_timer);
+
+                       priv->hw->mac->set_eee_timer(priv->ioaddr,
+                                                    STMMAC_DEFAULT_LIT_LS,
+                                                    priv->tx_lpi_timer);
+               } else
+                       /* Set HW EEE according to the speed */
+                       priv->hw->mac->set_eee_pls(priv->ioaddr,
+                                                  priv->phydev->link);
 
                pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
 
@@ -329,20 +341,6 @@ out:
        return ret;
 }
 
-/**
- * stmmac_eee_adjust: adjust HW EEE according to the speed
- * @priv: driver private structure
- * Description:
- *     When the EEE has been already initialised we have to
- *     modify the PLS bit in the LPI ctrl & status reg according
- *     to the PHY link status. For this reason.
- */
-static void stmmac_eee_adjust(struct stmmac_priv *priv)
-{
-       if (priv->eee_enabled)
-               priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
-}
-
 /* stmmac_get_tx_hwtstamp: get HW TX timestamps
  * @priv: driver private structure
  * @entry : descriptor index to be used.
@@ -769,7 +767,10 @@ static void stmmac_adjust_link(struct net_device *dev)
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
 
-       stmmac_eee_adjust(priv);
+       /* At this stage, it could be needed to setup the EEE or adjust some
+        * MAC related HW registers.
+        */
+       priv->eee_enabled = stmmac_eee_init(priv);
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -1277,7 +1278,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
 
        if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
                stmmac_enable_eee_mode(priv);
-               mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+               mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
        }
        spin_unlock(&priv->tx_lock);
 }
@@ -1671,14 +1672,9 @@ static int stmmac_open(struct net_device *dev)
        if (priv->phydev)
                phy_start(priv->phydev);
 
-       priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
+       priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
-       /* Using PCS we cannot dial with the phy registers at this stage
-        * so we do not support extra feature like EEE.
-        */
-       if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
-           priv->pcs != STMMAC_PCS_RTBI)
-               priv->eee_enabled = stmmac_eee_init(priv);
+       priv->eee_enabled = stmmac_eee_init(priv);
 
        stmmac_init_tx_coalesce(priv);
 
index 21a5b291b4b39d0ae65dcd8766c9e51a25665dee..d1a769f35f9d284f852e1001c87434ae9e96180a 100644 (file)
@@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->rx_packet_max = max(rx_packet_max, 128);
        priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
        priv->irq_enabled = true;
-       if (!ndev) {
+       if (!priv->cpts) {
                pr_err("error allocating cpts\n");
                goto clean_ndev_ret;
        }
@@ -1973,9 +1973,12 @@ static int cpsw_suspend(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
        struct net_device       *ndev = platform_get_drvdata(pdev);
+       struct cpsw_priv        *priv = netdev_priv(ndev);
 
        if (netif_running(ndev))
                cpsw_ndo_stop(ndev);
+       soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
+       soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
        pm_runtime_put_sync(&pdev->dev);
 
        return 0;
index 49dfd592ac1ecd4e0a86d8d5ad01572a97e5c346..053c84fd085313d4bd6487a62a33f17e812cc28b 100644 (file)
@@ -705,6 +705,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
        }
 
        buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+       ret = dma_mapping_error(ctlr->dev, buffer);
+       if (ret) {
+               cpdma_desc_free(ctlr->pool, desc, 1);
+               ret = -EINVAL;
+               goto unlock_ret;
+       }
+
        mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
        cpdma_desc_to_port(chan, mode, directed);
 
index ab2307b5d9a7c881cff59aabdc35b20823d14cd9..4dccead586bee57a076be015478809ea82e7157e 100644 (file)
@@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
 
        skb->protocol = eth_type_trans(skb, net);
        skb->ip_summed = CHECKSUM_NONE;
-       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci);
+       if (packet->vlan_tci & VLAN_TAG_PRESENT)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      packet->vlan_tci);
 
        net->stats.rx_packets++;
        net->stats.rx_bytes += packet->total_data_buflen;
index 59e9605de316809a702951a5c6de3dbee3a43a69..b6dd6a75919a69216964b2743ff14c60de460e20 100644 (file)
@@ -524,8 +524,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                        return -EMSGSIZE;
                num_pages = get_user_pages_fast(base, size, 0, &page[i]);
                if (num_pages != size) {
-                       for (i = 0; i < num_pages; i++)
-                               put_page(page[i]);
+                       int j;
+
+                       for (j = 0; j < num_pages; j++)
+                               put_page(page[i + j]);
                        return -EFAULT;
                }
                truesize = size * PAGE_SIZE;
index bfa9bb48e42d5cfc06e299e64809e29ea3097b5b..9c61f8734a40c09e950505184e39232ead9fa938 100644 (file)
@@ -1010,8 +1010,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                        return -EMSGSIZE;
                num_pages = get_user_pages_fast(base, size, 0, &page[i]);
                if (num_pages != size) {
-                       for (i = 0; i < num_pages; i++)
-                               put_page(page[i]);
+                       int j;
+
+                       for (j = 0; j < num_pages; j++)
+                               put_page(page[i + j]);
                        return -EFAULT;
                }
                truesize = size * PAGE_SIZE;
index d095d0d3056b82e05df3daddae6fe0d10bb10564..56459215a22b0b099f01af8218c10ef3ece5d184 100644 (file)
@@ -590,7 +590,13 @@ static const struct usb_device_id products[] = {
        {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},    /* HP un2400 Gobi Modem Device */
        {QMI_GOBI1K_DEVICE(0x04da, 0x250d)},    /* Panasonic Gobi Modem device */
        {QMI_GOBI1K_DEVICE(0x413c, 0x8172)},    /* Dell Gobi Modem device */
-       {QMI_GOBI1K_DEVICE(0x1410, 0xa001)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa001)},    /* Novatel/Verizon USB-1000 */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa002)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa003)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa004)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa005)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa006)},    /* Novatel Gobi Modem device */
+       {QMI_GOBI1K_DEVICE(0x1410, 0xa007)},    /* Novatel Gobi Modem device */
        {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)},    /* Asus Gobi Modem device */
        {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)},    /* ONDA Gobi Modem device */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)},    /* Generic Gobi Modem device */
index 3b1d2ee7156b00195376c674f605f5749a2daf64..57325f356d4f125421bfa297a4beb2302c5f194b 100644 (file)
@@ -565,18 +565,22 @@ skip:
 
 /* Watch incoming packets to learn mapping between Ethernet address
  * and Tunnel endpoint.
+ * Return true if packet is bogus and should be droppped.
  */
-static void vxlan_snoop(struct net_device *dev,
+static bool vxlan_snoop(struct net_device *dev,
                        __be32 src_ip, const u8 *src_mac)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_fdb *f;
-       int err;
 
        f = vxlan_find_mac(vxlan, src_mac);
        if (likely(f)) {
                if (likely(f->remote.remote_ip == src_ip))
-                       return;
+                       return false;
+
+               /* Don't migrate static entries, drop packets */
+               if (f->state & NUD_NOARP)
+                       return true;
 
                if (net_ratelimit())
                        netdev_info(dev,
@@ -588,14 +592,19 @@ static void vxlan_snoop(struct net_device *dev,
        } else {
                /* learned new entry */
                spin_lock(&vxlan->hash_lock);
-               err = vxlan_fdb_create(vxlan, src_mac, src_ip,
-                                      NUD_REACHABLE,
-                                      NLM_F_EXCL|NLM_F_CREATE,
-                                      vxlan->dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      0, NTF_SELF);
+
+               /* close off race between vxlan_flush and incoming packets */
+               if (netif_running(dev))
+                       vxlan_fdb_create(vxlan, src_mac, src_ip,
+                                        NUD_REACHABLE,
+                                        NLM_F_EXCL|NLM_F_CREATE,
+                                        vxlan->dst_port,
+                                        vxlan->default_dst.remote_vni,
+                                        0, NTF_SELF);
                spin_unlock(&vxlan->hash_lock);
        }
+
+       return false;
 }
 
 
@@ -727,8 +736,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                               vxlan->dev->dev_addr) == 0)
                goto drop;
 
-       if (vxlan->flags & VXLAN_F_LEARN)
-               vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+       if ((vxlan->flags & VXLAN_F_LEARN) &&
+           vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
+               goto drop;
 
        __skb_tunnel_rx(skb, vxlan->dev);
        skb_reset_network_header(skb);
@@ -1151,9 +1161,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
                struct sk_buff *skb1;
 
                skb1 = skb_clone(skb, GFP_ATOMIC);
-               rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
-               if (rc == NETDEV_TX_OK)
-                       rc = rc1;
+               if (skb1) {
+                       rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+                       if (rc == NETDEV_TX_OK)
+                               rc = rc1;
+               }
        }
 
        rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
index 147614ed86aa8ae5d4a8a5451304337a70bb0960..6a8a382c5f4c071f070c88f618296b1b9d6624df 100644 (file)
@@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci)
        struct frad_local       *flp;
        struct net_device       *master, *slave;
        int                     err;
+       bool                    found = false;
+
+       rtnl_lock();
 
        /* validate slave device */
        master = __dev_get_by_name(&init_net, dlci->devname);
-       if (!master)
-               return -ENODEV;
+       if (!master) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       list_for_each_entry(dlp, &dlci_devs, list) {
+               if (dlp->master == master) {
+                       found = true;
+                       break;
+               }
+       }
+       if (!found) {
+               err = -ENODEV;
+               goto out;
+       }
 
        if (netif_running(master)) {
-               return -EBUSY;
+               err = -EBUSY;
+               goto out;
        }
 
        dlp = netdev_priv(master);
        slave = dlp->slave;
        flp = netdev_priv(slave);
 
-       rtnl_lock();
        err = (*flp->deassoc)(slave, master);
        if (!err) {
                list_del(&dlp->list);
@@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci)
 
                dev_put(slave);
        }
+out:
        rtnl_unlock();
-
        return err;
 }
 
index 0743a47cef8f3c96b18b8a5bfa09fdd380cccc47..62f1b7636c9246c1d7a77b7c7bf28f8db2f17a6b 100644 (file)
@@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
                mutex_lock(&priv->htc_pm_lock);
 
                priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-               if (priv->ps_idle)
+               if (!priv->ps_idle)
                        chip_reset = true;
 
                mutex_unlock(&priv->htc_pm_lock);
index 1c9b1bac8b0d4795b4226f5944764c4dae3bbdba..83ab6be3fe6d502178b3253943bdd0097f4f8cbe 100644 (file)
@@ -1570,6 +1570,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
            txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
                return;
 
+       rcu_read_lock();
+
        ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
        last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
 
@@ -1608,8 +1610,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 
                if (ac == last_ac ||
                    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
-                       return;
+                       break;
        }
+
+       rcu_read_unlock();
 }
 
 /***********/
index b98f2235978e80269be1861b26ba019398591bae..2c593570497ceaa6a4f430764bddbe1b6bd4f651 100644 (file)
@@ -930,6 +930,10 @@ fail:
                        brcmf_fws_del_interface(ifp);
                        brcmf_fws_deinit(drvr);
                }
+               if (drvr->iflist[0]) {
+                       free_netdev(ifp->ndev);
+                       drvr->iflist[0] = NULL;
+               }
                if (p2p_ifp) {
                        free_netdev(p2p_ifp->ndev);
                        drvr->iflist[1] = NULL;
index 28e7aeedd184c4cc942ec67259dc978ca542794e..9fd6f2fef11bfe546126c86ec857507d8fe8d9eb 100644 (file)
@@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
  */
 static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
 {
-       /* disallow PS when one of the following global conditions meets */
-       if (!wlc->pub->associated)
-               return false;
-
-       /* disallow PS when one of these meets when not scanning */
-       if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
-               return false;
-
-       if (wlc->bsscfg->type == BRCMS_TYPE_AP)
-               return false;
-
-       if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
-               return false;
-
-       return true;
+       /* not supporting PS so always return false for now */
+       return false;
 }
 
 static void brcms_c_statsupd(struct brcms_c_info *wlc)
index c9f197d9ca1eecddf8d6f651c4f8598950986502..fe31590a51b2d81b41b40b2f862c96c5b2e10980 100644 (file)
@@ -816,6 +816,7 @@ out:
                rs_sta->last_txrate_idx = idx;
                info->control.rates[0].idx = rs_sta->last_txrate_idx;
        }
+       info->control.rates[0].count = 1;
 
        D_RATE("leave: %d\n", idx);
 }
index 1fc0b227e120d5d74054be24356f07ef2852824f..ed3c42a63a4369678f2764049f0b7e257f535fb4 100644 (file)
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
                info->control.rates[0].flags = 0;
        }
        info->control.rates[0].idx = rate_idx;
-
+       info->control.rates[0].count = 1;
 }
 
 static void *
index 907bd6e50aadce652145a32d496223b0a365e92b..10fbb176cc8e1d8ca259a02dfaf9c83b0bea2514 100644 (file)
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
                info->control.rates[0].flags = 0;
        }
        info->control.rates[0].idx = rate_idx;
-
+       info->control.rates[0].count = 1;
 }
 
 static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
index 707446fa00bdbaa969bdd6f60f3e6dab3d8db3bd..cd1ad00191857e962f74a45c0bbcfe56e1591514 100644 (file)
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
        struct iwl_chain_noise_data *data = &priv->chain_noise_data;
        int ret;
 
-       if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
+       if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
                return;
 
        if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
index 39aad9893e0b1d6df218655959b77e292721babe..40fed1f511e23e546d64ec1d7c69b3c9f196a16a 100644 (file)
@@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
         */
        if (load_module) {
                err = request_module("%s", op->name);
+#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
                if (err)
                        IWL_ERR(drv,
                                "failed to load module %s (error %d), is dynamic loading enabled?\n",
                                op->name, err);
+#endif
        }
        return;
 
index 55334d542e263d1f4784e40d37ac9d48618e2eac..b99fe3163866169c1bfc0c2091a0cae062205354 100644 (file)
@@ -2546,6 +2546,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
                info->control.rates[0].flags = 0;
        }
        info->control.rates[0].idx = rate_idx;
+       info->control.rates[0].count = 1;
 }
 
 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
index f212f16502ff43c3b446d3f99e136fbf5c4b795a..48c1891e3df6b00ce3417b7609f9c240ae36f2e4 100644 (file)
@@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
                tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
                return;
        } else if (ieee80211_is_back_req(fc)) {
-               tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+               tx_cmd->tx_flags |=
+                       cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
        }
 
        /* HT rate doesn't make sense for a non data frame */
index b52d70c75e1ab7dd3bc7d5a6a699281c6928ec62..72f32e5caa4d1931b8719891c150f1b30304425e 100644 (file)
@@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
         * TODO: we do not use +6 dBm option to do not increase power beyond
         * regulatory limit, however this could be utilized for devices with
         * CAPABILITY_POWER_LIMIT.
+        *
+        * TODO: add different temperature compensation code for RT3290 & RT5390
+        * to allow to use BBP_R1 for those chips.
         */
-       rt2800_bbp_read(rt2x00dev, 1, &r1);
-       if (delta <= -12) {
-               power_ctrl = 2;
-               delta += 12;
-       } else if (delta <= -6) {
-               power_ctrl = 1;
-               delta += 6;
-       } else {
-               power_ctrl = 0;
+       if (!rt2x00_rt(rt2x00dev, RT3290) &&
+           !rt2x00_rt(rt2x00dev, RT5390)) {
+               rt2800_bbp_read(rt2x00dev, 1, &r1);
+               if (delta <= -12) {
+                       power_ctrl = 2;
+                       delta += 12;
+               } else if (delta <= -6) {
+                       power_ctrl = 1;
+                       delta += 6;
+               } else {
+                       power_ctrl = 0;
+               }
+               rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
+               rt2800_bbp_write(rt2x00dev, 1, r1);
        }
-       rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
-       rt2800_bbp_write(rt2x00dev, 1, r1);
+
        offset = TX_PWR_CFG_0;
 
        for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
index 9544cdc0d1af120098466ca63d814acfa28957ef..e79e006eb9abc4eab4722aa483d2c6fb38a8c455 100644 (file)
@@ -811,6 +811,70 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
        return pcidev->irq;
 }
 
+static struct iosapic_info *first_isi = NULL;
+
+#ifdef CONFIG_64BIT
+int iosapic_serial_irq(int num)
+{
+       struct iosapic_info *isi = first_isi;
+       struct irt_entry *irte = NULL;  /* only used if PAT PDC */
+       struct vector_info *vi;
+       int isi_line;   /* line used by device */
+
+       /* lookup IRT entry for isi/slot/pin set */
+       irte = &irt_cell[num];
+
+       DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
+               irte,
+               irte->entry_type,
+               irte->entry_length,
+               irte->polarity_trigger,
+               irte->src_bus_irq_devno,
+               irte->src_bus_id,
+               irte->src_seg_id,
+               irte->dest_iosapic_intin,
+               (u32) irte->dest_iosapic_addr);
+       isi_line = irte->dest_iosapic_intin;
+
+       /* get vector info for this input line */
+       vi = isi->isi_vector + isi_line;
+       DBG_IRT("iosapic_serial_irq:  line %d vi 0x%p\n", isi_line, vi);
+
+       /* If this IRQ line has already been setup, skip it */
+       if (vi->irte)
+               goto out;
+
+       vi->irte = irte;
+
+       /*
+        * Allocate processor IRQ
+        *
+        * XXX/FIXME The txn_alloc_irq() code and related code should be
+        * moved to enable_irq(). That way we only allocate processor IRQ
+        * bits for devices that actually have drivers claiming them.
+        * Right now we assign an IRQ to every PCI device present,
+        * regardless of whether it's used or not.
+        */
+       vi->txn_irq = txn_alloc_irq(8);
+
+       if (vi->txn_irq < 0)
+               panic("I/O sapic: couldn't get TXN IRQ\n");
+
+       /* enable_irq() will use txn_* to program IRdT */
+       vi->txn_addr = txn_alloc_addr(vi->txn_irq);
+       vi->txn_data = txn_alloc_data(vi->txn_irq);
+
+       vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI;
+       vi->eoi_data = cpu_to_le32(vi->txn_data);
+
+       cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi);
+
+ out:
+
+       return vi->txn_irq;
+}
+#endif
+
 
 /*
 ** squirrel away the I/O Sapic Version
@@ -877,6 +941,8 @@ void *iosapic_register(unsigned long hpa)
                vip->irqline = (unsigned char) cnt;
                vip->iosapic = isi;
        }
+       if (!first_isi)
+               first_isi = isi;
        return isi;
 }
 
index 716aa93fff76437ab0038de385d7c4fa7ec04834..59df8575a48ce834fb48ea002689ed97ed9ca51a 100644 (file)
@@ -61,6 +61,7 @@ static DEFINE_MUTEX(bridge_mutex);
 static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
+static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
 static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
 static void free_bridge(struct kref *kref);
 
@@ -147,7 +148,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
 
 
 static const struct acpi_dock_ops acpiphp_dock_ops = {
-       .handler = handle_hotplug_event_func,
+       .handler = hotplug_event_func,
 };
 
 /* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -179,6 +180,20 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
        return true;
 }
 
+static void acpiphp_dock_init(void *data)
+{
+       struct acpiphp_func *func = data;
+
+       get_bridge(func->slot->bridge);
+}
+
+static void acpiphp_dock_release(void *data)
+{
+       struct acpiphp_func *func = data;
+
+       put_bridge(func->slot->bridge);
+}
+
 /* callback routine to register each ACPI PCI slot object */
 static acpi_status
 register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -298,7 +313,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
                 */
                newfunc->flags &= ~FUNC_HAS_EJ0;
                if (register_hotplug_dock_device(handle,
-                       &acpiphp_dock_ops, newfunc))
+                       &acpiphp_dock_ops, newfunc,
+                       acpiphp_dock_init, acpiphp_dock_release))
                        dbg("failed to register dock device\n");
 
                /* we need to be notified when dock events happen
@@ -670,6 +686,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
        struct pci_bus *bus = slot->bridge->pci_bus;
        struct acpiphp_func *func;
        int num, max, pass;
+       LIST_HEAD(add_list);
 
        if (slot->flags & SLOT_ENABLED)
                goto err_exit;
@@ -694,13 +711,15 @@ static int __ref enable_device(struct acpiphp_slot *slot)
                                max = pci_scan_bridge(bus, dev, max, pass);
                                if (pass && dev->subordinate) {
                                        check_hotplug_bridge(slot, dev);
-                                       pci_bus_size_bridges(dev->subordinate);
+                                       pcibios_resource_survey_bus(dev->subordinate);
+                                       __pci_bus_size_bridges(dev->subordinate,
+                                                              &add_list);
                                }
                        }
                }
        }
 
-       pci_bus_assign_resources(bus);
+       __pci_bus_assign_resources(bus, &add_list, NULL);
        acpiphp_sanitize_bus(bus);
        acpiphp_set_hpp_values(bus);
        acpiphp_set_acpi_region(slot);
@@ -1065,22 +1084,12 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
        alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
 }
 
-static void _handle_hotplug_event_func(struct work_struct *work)
+static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
 {
-       struct acpiphp_func *func;
+       struct acpiphp_func *func = context;
        char objname[64];
        struct acpi_buffer buffer = { .length = sizeof(objname),
                                      .pointer = objname };
-       struct acpi_hp_work *hp_work;
-       acpi_handle handle;
-       u32 type;
-
-       hp_work = container_of(work, struct acpi_hp_work, work);
-       handle = hp_work->handle;
-       type = hp_work->type;
-       func = (struct acpiphp_func *)hp_work->context;
-
-       acpi_scan_lock_acquire();
 
        acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
@@ -1113,6 +1122,18 @@ static void _handle_hotplug_event_func(struct work_struct *work)
                warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
                break;
        }
+}
+
+static void _handle_hotplug_event_func(struct work_struct *work)
+{
+       struct acpi_hp_work *hp_work;
+       struct acpiphp_func *func;
+
+       hp_work = container_of(work, struct acpi_hp_work, work);
+       func = hp_work->context;
+       acpi_scan_lock_acquire();
+
+       hotplug_event_func(hp_work->handle, hp_work->type, func);
 
        acpi_scan_lock_release();
        kfree(hp_work); /* allocated in handle_hotplug_event_func */
index 68678ed76b0da93114280ae2ec878457159f63c5..d1182c4a754e235b25fb2914d658010d05157756 100644 (file)
@@ -202,6 +202,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                    struct resource *res, unsigned int reg);
 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
 void pci_configure_ari(struct pci_dev *dev);
+void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+                       struct list_head *realloc_head);
+void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
+                                     struct list_head *realloc_head,
+                                     struct list_head *fail_head);
 
 /**
  * pci_ari_enabled - query ARI forwarding status
index 16abaaa1f83caed1009fb04018ece089b0138f26..d254e237953382bb99768c7cca79b65c3b9aded8 100644 (file)
@@ -1044,7 +1044,7 @@ handle_done:
        ;
 }
 
-static void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+void __ref __pci_bus_size_bridges(struct pci_bus *bus,
                        struct list_head *realloc_head)
 {
        struct pci_dev *dev;
@@ -1115,9 +1115,9 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pci_bus_size_bridges);
 
-static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
-                                        struct list_head *realloc_head,
-                                        struct list_head *fail_head)
+void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
+                                     struct list_head *realloc_head,
+                                     struct list_head *fail_head)
 {
        struct pci_bus *b;
        struct pci_dev *dev;
index d8fa37d5c7342bee03a7e6bce2794a8669342f9e..2c9155b66f09f914789c4afde4a8fd630ddd6eba 100644 (file)
@@ -439,7 +439,7 @@ static int tps6586x_regulator_remove(struct platform_device *pdev)
 
 static struct platform_driver tps6586x_regulator_driver = {
        .driver = {
-               .name   = "tps6586x-pmic",
+               .name   = "tps6586x-regulator",
                .owner  = THIS_MODULE,
        },
        .probe          = tps6586x_regulator_probe,
index 439c012be763646c48be2910d67a3acce50c3d48..b63d534192e33a8defea70b6c0195293392ea98e 100644 (file)
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
                file->f_pos += offset;
                break;
        case 2:
-               file->f_pos = debug->buffer_len - offset;
+               file->f_pos = debug->buffer_len + offset;
                break;
        default:
                return -EINVAL;
index 292b24f9bf935f119ab12dbbca96d5a00d16783d..32ae6c67ea3ae39980e58c7d8d0059af1bf4640c 100644 (file)
@@ -1656,9 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
 
        if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
            fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
-               skb->vlan_tci = VLAN_TAG_PRESENT |
-                               vlan_dev_vlan_id(fcoe->netdev);
+               /* must set skb->dev before calling vlan_put_tag */
                skb->dev = fcoe->realdev;
+               skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                            vlan_dev_vlan_id(fcoe->netdev));
+               if (!skb)
+                       return -ENOMEM;
        } else
                skb->dev = fcoe->netdev;
 
index cd743c545ce9a9031970a222f2087d6a15d4d589..795843dde8ecdf422718ab3cb2e54ceb18c43dff 100644 (file)
@@ -1548,9 +1548,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
 {
        struct fcoe_fcf *fcf;
        struct fcoe_fcf *best = fip->sel_fcf;
-       struct fcoe_fcf *first;
-
-       first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
 
        list_for_each_entry(fcf, &fip->fcfs, list) {
                LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
@@ -1568,17 +1565,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
                                        "" : "un");
                        continue;
                }
-               if (fcf->fabric_name != first->fabric_name ||
-                   fcf->vfid != first->vfid ||
-                   fcf->fc_map != first->fc_map) {
+               if (!best || fcf->pri < best->pri || best->flogi_sent)
+                       best = fcf;
+               if (fcf->fabric_name != best->fabric_name ||
+                   fcf->vfid != best->vfid ||
+                   fcf->fc_map != best->fc_map) {
                        LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
                                        "or FC-MAP\n");
                        return NULL;
                }
-               if (fcf->flogi_sent)
-                       continue;
-               if (!best || fcf->pri < best->pri || best->flogi_sent)
-                       best = fcf;
        }
        fip->sel_fcf = best;
        if (best) {
index adc1f7f471f554dd3bc48825ab45bd65df6ed3af..85e1ffd0e5c5af17e53305ad016f28898d65c8f7 100644 (file)
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
                pos = file->f_pos + offset;
                break;
        case 2:
-               pos = fnic_dbg_prt->buffer_len - offset;
+               pos = fnic_dbg_prt->buffer_len + offset;
        }
        return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
                          -EINVAL : (file->f_pos = pos);
index 82a3c1ec8706600473c4daedd33b248d2c94518f..6c4cedb44c075fba2ba81999e187e4587901b5f0 100644 (file)
@@ -8980,19 +8980,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
        if (!ioa_cfg->res_entries)
                goto out;
 
-       if (ioa_cfg->sis64) {
-               ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
-                                             BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
-               ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
-                                            BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
-               ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
-                                           BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
-
-               if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
-                       || !ioa_cfg->vset_ids)
-                       goto out_free_res_entries;
-       }
-
        for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
                list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
                ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
@@ -9089,9 +9076,6 @@ out_free_vpd_cbs:
                            ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
 out_free_res_entries:
        kfree(ioa_cfg->res_entries);
-       kfree(ioa_cfg->target_ids);
-       kfree(ioa_cfg->array_ids);
-       kfree(ioa_cfg->vset_ids);
        goto out;
 }
 
index a1fb840596ef08cb129c8301dd81a34d24c73fd5..07a85ce4178287417648847c360034a89b431415 100644 (file)
@@ -1440,9 +1440,9 @@ struct ipr_ioa_cfg {
        /*
         * Bitmaps for SIS64 generated target values
         */
-       unsigned long *target_ids;
-       unsigned long *array_ids;
-       unsigned long *vset_ids;
+       unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+       unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+       unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
 
        u16 type; /* CCIN of the card */
 
index c772d8d271594e0b2365745645d7ba7fee78c16b..8b928c67e4b9a14ca6dd94421caa95d1275995d1 100644 (file)
@@ -463,13 +463,7 @@ static void fc_exch_delete(struct fc_exch *ep)
        fc_exch_release(ep);    /* drop hold for exch in mp */
 }
 
-/**
- * fc_seq_send() - Send a frame using existing sequence/exchange pair
- * @lport: The local port that the exchange will be sent on
- * @sp:           The sequence to be sent
- * @fp:           The frame to be sent on the exchange
- */
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
+static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
                       struct fc_frame *fp)
 {
        struct fc_exch *ep;
@@ -479,7 +473,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
        u8 fh_type = fh->fh_type;
 
        ep = fc_seq_exch(sp);
-       WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
+       WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
 
        f_ctl = ntoh24(fh->fh_f_ctl);
        fc_exch_setup_hdr(ep, fp, f_ctl);
@@ -502,17 +496,34 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
        error = lport->tt.frame_send(lport, fp);
 
        if (fh_type == FC_TYPE_BLS)
-               return error;
+               goto out;
 
        /*
         * Update the exchange and sequence flags,
         * assuming all frames for the sequence have been sent.
         * We can only be called to send once for each sequence.
         */
-       spin_lock_bh(&ep->ex_lock);
        ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;   /* not first seq */
        if (f_ctl & FC_FC_SEQ_INIT)
                ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+out:
+       return error;
+}
+
+/**
+ * fc_seq_send() - Send a frame using existing sequence/exchange pair
+ * @lport: The local port that the exchange will be sent on
+ * @sp:           The sequence to be sent
+ * @fp:           The frame to be sent on the exchange
+ */
+static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
+                      struct fc_frame *fp)
+{
+       struct fc_exch *ep;
+       int error;
+       ep = fc_seq_exch(sp);
+       spin_lock_bh(&ep->ex_lock);
+       error = fc_seq_send_locked(lport, sp, fp);
        spin_unlock_bh(&ep->ex_lock);
        return error;
 }
@@ -629,7 +640,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
        if (fp) {
                fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
                               FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
-               error = fc_seq_send(ep->lp, sp, fp);
+               error = fc_seq_send_locked(ep->lp, sp, fp);
        } else
                error = -ENOBUFS;
        return error;
@@ -1132,7 +1143,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
        f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
        f_ctl |= ep->f_ctl;
        fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
-       fc_seq_send(ep->lp, sp, fp);
+       fc_seq_send_locked(ep->lp, sp, fp);
 }
 
 /**
@@ -1307,8 +1318,8 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
                ap->ba_low_seq_cnt = htons(sp->cnt);
        }
        sp = fc_seq_start_next_locked(sp);
-       spin_unlock_bh(&ep->ex_lock);
        fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+       spin_unlock_bh(&ep->ex_lock);
        fc_frame_free(rx_fp);
        return;
 
index d518d17e940fc16691e48e9b1f662659716a1145..6bbb9447b75d4895c03a38af0e7066e9b32338ca 100644 (file)
@@ -1962,7 +1962,7 @@ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
                rdata->flags |= FC_RP_FLAGS_RETRY;
        rdata->supported_classes = FC_COS_CLASS3;
 
-       if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
+       if (!(lport->service_params & FCP_SPPF_INIT_FCN))
                return 0;
 
        spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
index f63f5ff7f27467f7b34f80d3bc0089538dc147df..f525ecb7a9c6e187bf1097a6baf7aabad96377a4 100644 (file)
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
                pos = file->f_pos + off;
                break;
        case 2:
-               pos = debug->len - off;
+               pos = debug->len + off;
        }
        return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
 }
index 98ab921070d2a80b67f5f3a4c80c93a97fc870a3..0a5c8951cebb4ef0e345bdf4b5a8fc1cd35e2ed6 100644 (file)
@@ -278,3 +278,14 @@ qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
 
        set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
 }
+
+static inline void
+qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
+{
+       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+               clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+               complete(&ha->mbx_intr_comp);
+       }
+}
index 259d9205d876a7691477a423235543c191cd669a..d2a4c75e5b8fbc7f086877767e84c88bf49ea704 100644 (file)
@@ -104,14 +104,9 @@ qla2100_intr_handler(int irq, void *dev_id)
                        RD_REG_WORD(&reg->hccr);
                }
        }
+       qla2x00_handle_mbx_completion(ha, status);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-               complete(&ha->mbx_intr_comp);
-       }
-
        return (IRQ_HANDLED);
 }
 
@@ -221,14 +216,9 @@ qla2300_intr_handler(int irq, void *dev_id)
                WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
                RD_REG_WORD_RELAXED(&reg->hccr);
        }
+       qla2x00_handle_mbx_completion(ha, status);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-               complete(&ha->mbx_intr_comp);
-       }
-
        return (IRQ_HANDLED);
 }
 
@@ -2613,14 +2603,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
                if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
                        ndelay(3500);
        }
+       qla2x00_handle_mbx_completion(ha, status);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-               complete(&ha->mbx_intr_comp);
-       }
-
        return IRQ_HANDLED;
 }
 
@@ -2763,13 +2748,9 @@ qla24xx_msix_default(int irq, void *dev_id)
                }
                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
        } while (0);
+       qla2x00_handle_mbx_completion(ha, status);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-               complete(&ha->mbx_intr_comp);
-       }
        return IRQ_HANDLED;
 }
 
index 9e5d89db7272d0e8795a6addef96b56fdcc33e8a..3587ec267fa6468c8cd51e86082f09e603c80477 100644 (file)
@@ -179,8 +179,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 
                wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
 
-               clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
-
        } else {
                ql_dbg(ql_dbg_mbx, vha, 0x1011,
                    "Cmd=%x Polling Mode.\n", command);
index 937fed8cb0388550b619dfae667d452fa1c686a7..a6df55838365e2ebac0c15abe52cecb462518f48 100644 (file)
@@ -148,9 +148,6 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
                wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
-
-               clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
-
        } else {
                ql_dbg(ql_dbg_mbx, vha, 0x112c,
                    "Cmd=%x Polling Mode.\n", command);
@@ -2934,13 +2931,10 @@ qlafx00_intr_handler(int irq, void *dev_id)
                QLAFX00_CLR_INTR_REG(ha, clr_intr);
                QLAFX00_RD_INTR_REG(ha);
        }
+
+       qla2x00_handle_mbx_completion(ha, status);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-               complete(&ha->mbx_intr_comp);
-       }
        return IRQ_HANDLED;
 }
 
index 10754f5183037dce6d41c520557b4486cfae6288..cce0cd0d7ec43da7359679265b201a0bc504d5d8 100644 (file)
@@ -2074,9 +2074,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
                }
                WRT_REG_DWORD(&reg->host_int, 0);
        }
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       if (!ha->flags.msi_enabled)
-               qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
 
 #ifdef QL_DEBUG_LEVEL_17
        if (!irq && ha->flags.eeh_busy)
@@ -2085,11 +2082,12 @@ qla82xx_intr_handler(int irq, void *dev_id)
                    status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
 #endif
 
-       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-               complete(&ha->mbx_intr_comp);
-       }
+       qla2x00_handle_mbx_completion(ha, status);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       if (!ha->flags.msi_enabled)
+               qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
        return IRQ_HANDLED;
 }
 
@@ -2149,8 +2147,6 @@ qla82xx_msix_default(int irq, void *dev_id)
                WRT_REG_DWORD(&reg->host_int, 0);
        } while (0);
 
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
 #ifdef QL_DEBUG_LEVEL_17
        if (!irq && ha->flags.eeh_busy)
                ql_log(ql_log_warn, vha, 0x5044,
@@ -2158,11 +2154,9 @@ qla82xx_msix_default(int irq, void *dev_id)
                    status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
 #endif
 
-       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-               (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-                       set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-                       complete(&ha->mbx_intr_comp);
-       }
+       qla2x00_handle_mbx_completion(ha, status);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        return IRQ_HANDLED;
 }
 
@@ -3345,7 +3339,7 @@ void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
                ha->flags.mbox_busy = 0;
                ql_log(ql_log_warn, vha, 0x6010,
                    "Doing premature completion of mbx command.\n");
-               if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+               if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
                        complete(&ha->mbx_intr_comp);
        }
 }
index 7a3870f385f63adcc7ed23db355af7fe0682fb93..66b0b26a1381e4ecb17806c935ca05c1b62cfe70 100644 (file)
@@ -688,8 +688,12 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
                 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
                 * for qla_tgt_xmit_response LLD code
                 */
+               if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
+                       se_cmd->residual_count = 0;
+               }
                se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
-               se_cmd->residual_count = se_cmd->data_length;
+               se_cmd->residual_count += se_cmd->data_length;
 
                cmd->bufflen = 0;
        }
index c735c5a008a2a180669cb1ece71fa8ccfb112cdf..6427600b5bbe66c5d66fbcf967a1b20a008162da 100644 (file)
@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
                int ret;
 
                sg_free_table(sgt);
-               ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+               ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
                if (ret)
                        return ret;
        }
index f5d84d6f8222c4afc261f738daa695da332a46df..48b396fced0acdde9fe6f28cb518a5b3024247c0 100644 (file)
@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
            acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
                return NULL;
 
-       pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL);
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata) {
                dev_err(&pdev->dev,
                        "failed to allocate memory for platform data\n");
index 5000586cb98da2331b0cbe1c31ec26b0f6f40eb4..71cc3e6ef47ca770dd73c4d55f8153d42a8471aa 100644 (file)
@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
        }
 
        ret = pm_runtime_get_sync(&sdd->pdev->dev);
-       if (ret != 0) {
+       if (ret < 0) {
                dev_err(dev, "Failed to enable device: %d\n", ret);
                goto out_tx;
        }
index 2e4a28b018e8d894d92975ead5e14dfe01dabec4..12f321dd23993ea8a1d35c54e6d14b733470484c 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_DM365_VPFE
        tristate "DM365 VPFE Media Controller Capture Driver"
-       depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE
+       depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
        select VIDEOBUF2_DMA_CONTIG
        help
          Support for DM365 VPFE based Media Controller Capture driver.
index b88e1ddce229755155a25da66af544218642321d..d8ce20d2fbda54f35fe5a12031cf391230719f69 100644 (file)
@@ -639,7 +639,8 @@ static int vpfe_probe(struct platform_device *pdev)
        if (ret)
                goto probe_free_dev_mem;
 
-       if (vpfe_initialize_modules(vpfe_dev, pdev))
+       ret = vpfe_initialize_modules(vpfe_dev, pdev);
+       if (ret)
                goto probe_disable_clock;
 
        vpfe_dev->media_dev.dev = vpfe_dev->pdev;
@@ -663,7 +664,8 @@ static int vpfe_probe(struct platform_device *pdev)
        /* set the driver data in platform device */
        platform_set_drvdata(pdev, vpfe_dev);
        /* register subdevs/entities */
-       if (vpfe_register_entities(vpfe_dev))
+       ret = vpfe_register_entities(vpfe_dev);
+       if (ret)
                goto probe_out_v4l2_unregister;
 
        ret = vpfe_attach_irq(vpfe_dev);
index df6569b997b88269bfdb48fd17dc410a0da50c0a..34f3b6d02d2a4075b423d1adcca0c62b5f949c96 100644 (file)
@@ -5,6 +5,7 @@ config SOLO6X10
        select VIDEOBUF2_DMA_SG
        select VIDEOBUF2_DMA_CONTIG
        select SND_PCM
+       select FONT_8x16
        ---help---
          This driver supports the Softlogic based MPEG-4 and h.264 codec
          cards.
index 39de5e021ccbc8646ec01c62bb952f66691e69b0..73fc3cc19e332ccd8d800fc6d4744f07c016cb30 100644 (file)
@@ -873,7 +873,7 @@ static int qt_open(struct tty_struct *tty,
        result = qt_get_device(serial, &port0->device_data);
 
        /* Port specific setups */
-       result = qt_open_channel(serial, port->number, &channel_data);
+       result = qt_open_channel(serial, port->port_number, &channel_data);
        if (result < 0) {
                dev_dbg(&port->dev, "qt_open_channel failed\n");
                return result;
@@ -888,7 +888,7 @@ static int qt_open(struct tty_struct *tty,
            (SERIAL_MSR_CTS | SERIAL_MSR_DSR | SERIAL_MSR_RI | SERIAL_MSR_CD);
 
        /* Set Baud rate to default and turn off (default)flow control here */
-       result = qt_setuart(serial, port->number, DEFAULT_DIVISOR, DEFAULT_LCR);
+       result = qt_setuart(serial, port->port_number, DEFAULT_DIVISOR, DEFAULT_LCR);
        if (result < 0) {
                dev_dbg(&port->dev, "qt_setuart failed\n");
                return result;
@@ -906,8 +906,7 @@ static int qt_open(struct tty_struct *tty,
                        qt_submit_urb_from_open(serial, port);
        }
 
-       dev_dbg(&port->dev, "port number is %d\n", port->number);
-       dev_dbg(&port->dev, "serial number is %d\n", port->serial->minor);
+       dev_dbg(&port->dev, "minor number is %d\n", port->minor);
        dev_dbg(&port->dev,
                "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
        dev_dbg(&port->dev,
@@ -1003,7 +1002,7 @@ static void qt_close(struct usb_serial_port *port)
        status = 0;
 
        tty = tty_port_tty_get(&port->port);
-       index = tty->index - serial->minor;
+       index = port->port_number;
 
        qt_port = qt_get_port_private(port);
        port0 = qt_get_port_private(serial->port[0]);
@@ -1022,14 +1021,11 @@ static void qt_close(struct usb_serial_port *port)
        /* Close uart channel */
        status = qt_close_channel(serial, index);
        if (status < 0)
-               dev_dbg(&port->dev,
-                       "%s - port %d qt_close_channel failed.\n",
-                       __func__, port->number);
+               dev_dbg(&port->dev, "%s - qt_close_channel failed.\n", __func__);
 
        port0->open_ports--;
 
-       dev_dbg(&port->dev, "qt_num_open_ports in close%d:in port%d\n",
-               port0->open_ports, port->number);
+       dev_dbg(&port->dev, "qt_num_open_ports in close%d\n", port0->open_ports);
 
        if (port0->open_ports == 0) {
                if (serial->port[0]->interrupt_in_urb) {
@@ -1133,12 +1129,11 @@ static int qt_ioctl(struct tty_struct *tty,
 {
        struct usb_serial_port *port = tty->driver_data;
        struct quatech_port *qt_port = qt_get_port_private(port);
-       struct usb_serial *serial = get_usb_serial(port, __func__);
        unsigned int index;
 
        dev_dbg(&port->dev, "%s cmd 0x%04x\n", __func__, cmd);
 
-       index = tty->index - serial->minor;
+       index = port->port_number;
 
        if (cmd == TIOCMIWAIT) {
                while (qt_port != NULL) {
@@ -1169,8 +1164,7 @@ static int qt_ioctl(struct tty_struct *tty,
                return 0;
        }
 
-       dev_dbg(&port->dev, "%s -No ioctl for that one.  port = %d\n",
-               __func__, port->number);
+       dev_dbg(&port->dev, "%s -No ioctl for that one.\n", __func__);
        return -ENOIOCTLCMD;
 }
 
@@ -1185,7 +1179,7 @@ static void qt_set_termios(struct tty_struct *tty,
        int baud, divisor, remainder;
        int status;
 
-       index = tty->index - port->serial->minor;
+       index = port->port_number;
 
        switch (cflag & CSIZE) {
        case CS5:
@@ -1245,8 +1239,7 @@ static void qt_set_termios(struct tty_struct *tty,
 
        /* Now determine flow control */
        if (cflag & CRTSCTS) {
-               dev_dbg(&port->dev, "%s - Enabling HW flow control port %d\n",
-                       __func__, port->number);
+               dev_dbg(&port->dev, "%s - Enabling HW flow control\n", __func__);
 
                /* Enable RTS/CTS flow control */
                status = box_set_hw_flow_ctrl(port->serial, index, 1);
@@ -1258,8 +1251,7 @@ static void qt_set_termios(struct tty_struct *tty,
        } else {
                /* Disable RTS/CTS flow control */
                dev_dbg(&port->dev,
-                       "%s - disabling HW flow control port %d\n",
-                       __func__, port->number);
+                       "%s - disabling HW flow control\n", __func__);
 
                status = box_set_hw_flow_ctrl(port->serial, index, 0);
                if (status < 0) {
@@ -1303,7 +1295,7 @@ static void qt_break(struct tty_struct *tty, int break_state)
        u16 index, onoff;
        unsigned int result;
 
-       index = tty->index - serial->minor;
+       index = port->port_number;
 
        qt_port = qt_get_port_private(port);
 
@@ -1332,7 +1324,7 @@ static inline int qt_real_tiocmget(struct tty_struct *tty,
        int status;
        unsigned int index;
 
-       index = tty->index - serial->minor;
+       index = port->port_number;
        status =
            box_get_register(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
        if (status >= 0) {
@@ -1371,7 +1363,7 @@ static inline int qt_real_tiocmset(struct tty_struct *tty,
        int status;
        unsigned int index;
 
-       index = tty->index - serial->minor;
+       index = port->port_number;
        status =
            box_get_register(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
        if (status < 0)
index 13e9e715ad2e738a581d7db6f235d298b9b886fb..8d8b3ff68490be34fd78382f256bdd5dedda87f2 100644 (file)
@@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser(
        struct iscsi_tpg_np *tpg_np_iser = NULL;
        char *endptr;
        u32 op;
-       int rc;
+       int rc = 0;
 
        op = simple_strtoul(page, &endptr, 0);
        if ((op != 1) && (op != 0)) {
@@ -174,31 +174,32 @@ static ssize_t lio_target_np_store_iser(
                return -EINVAL;
 
        if (op) {
-               int rc = request_module("ib_isert");
-               if (rc != 0)
+               rc = request_module("ib_isert");
+               if (rc != 0) {
                        pr_warn("Unable to request_module for ib_isert\n");
+                       rc = 0;
+               }
 
                tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
                                np->np_ip, tpg_np, ISCSI_INFINIBAND);
-               if (!tpg_np_iser || IS_ERR(tpg_np_iser))
+               if (IS_ERR(tpg_np_iser)) {
+                       rc = PTR_ERR(tpg_np_iser);
                        goto out;
+               }
        } else {
                tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
-               if (!tpg_np_iser)
-                       goto out;
-
-               rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
-               if (rc < 0)
-                       goto out;
+               if (tpg_np_iser) {
+                       rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+                       if (rc < 0)
+                               goto out;
+               }
        }
 
-       printk("lio_target_np_store_iser() done, op: %d\n", op);
-
        iscsit_put_tpg(tpg);
        return count;
 out:
        iscsit_put_tpg(tpg);
-       return -EINVAL;
+       return rc;
 }
 
 TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
index 8e6298cc8839711e03745c08f7be04446ac47f3b..dcb199da06b9678d8dbd5fd1446dc0493edae003 100644 (file)
@@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
                return 0;
 
        sess->time2retain_timer_flags |= ISCSI_TF_STOP;
-       spin_unlock_bh(&se_tpg->session_lock);
+       spin_unlock(&se_tpg->session_lock);
 
        del_timer_sync(&sess->time2retain_timer);
 
-       spin_lock_bh(&se_tpg->session_lock);
+       spin_lock(&se_tpg->session_lock);
        sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
        pr_debug("Stopped Time2Retain Timer for SID: %u\n",
                        sess->sid);
index bb5d5c5bce65107877b4e326c70e4d195ac4ad6b..3402241be87cd401b500da10a398cb734e62bf8b 100644 (file)
@@ -984,8 +984,6 @@ int iscsi_target_setup_login_socket(
        }
 
        np->np_transport = t;
-       printk("Set np->np_transport to %p -> %s\n", np->np_transport,
-                               np->np_transport->name);
        return 0;
 }
 
@@ -1002,7 +1000,6 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 
        conn->sock = new_sock;
        conn->login_family = np->np_sockaddr.ss_family;
-       printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock);
 
        if (np->np_sockaddr.ss_family == AF_INET6) {
                memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
index 7ad912060e21364ab75b577c28f7cc159cc436c3..cd5018ff9cd78e2b8d547a559e5d0eebb8ff8283 100644 (file)
@@ -721,9 +721,6 @@ int iscsi_target_locate_portal(
 
                start += strlen(key) + strlen(value) + 2;
        }
-
-       printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
-
        /*
         * See 5.3.  Login Phase.
         */
index 6b78399bc7c97ddb436aa2982a303aa51836232b..58ad1c05b7f8f9deea45e038c0400c5fc2a2fd39 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_TTY)              += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
-                                  tty_buffer.o tty_port.o tty_mutex.o
+                                  tty_buffer.o tty_port.o tty_mutex.o tty_ldsem.o
 obj-$(CONFIG_LEGACY_PTYS)      += pty.o
 obj-$(CONFIG_UNIX98_PTYS)      += pty.o
 obj-$(CONFIG_AUDIT)            += tty_audit.o
index b6f7d52f7c35ea0c8a74f6b14e4f7a1a09ec24d2..9d47f50c2755a37835ec04662a194db631dba08a 100644 (file)
@@ -1328,7 +1328,7 @@ out_error:
  */
 static int __init hvc_iucv_config(char *val)
 {
-        return strict_strtoul(val, 10, &hvc_iucv_devices);
+        return kstrtoul(val, 10, &hvc_iucv_devices);
 }
 
 
index 6c7fe90ad72d48d2834536331e6826ee2719f94f..4bf0fc0843d73bc61cd99d0cc5898177fe0dc39c 100644 (file)
@@ -89,6 +89,7 @@ struct n_tty_data {
        int read_head;
        int read_tail;
        int read_cnt;
+       int minimum_to_wake;
 
        unsigned char *echo_buf;
        unsigned int echo_pos;
@@ -114,22 +115,25 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
 }
 
 /**
- *     n_tty_set__room -       receive space
+ *     n_tty_set_room  -       receive space
  *     @tty: terminal
  *
- *     Called by the driver to find out how much data it is
- *     permitted to feed to the line discipline without any being lost
- *     and thus to manage flow control. Not serialized. Answers for the
- *     "instant".
+ *     Updates tty->receive_room to reflect the currently available space
+ *     in the input buffer, and re-schedules the flip buffer work if space
+ *     just became available.
+ *
+ *     Locks: Concurrent update is protected with read_lock
  */
 
-static void n_tty_set_room(struct tty_struct *tty)
+static int set_room(struct tty_struct *tty)
 {
        struct n_tty_data *ldata = tty->disc_data;
        int left;
        int old_left;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&ldata->read_lock, flags);
 
-       /* ldata->read_cnt is not read locked ? */
        if (I_PARMRK(tty)) {
                /* Multiply read_cnt by 3, since each byte might take up to
                 * three times as many spaces when PARMRK is set (depending on
@@ -149,8 +153,15 @@ static void n_tty_set_room(struct tty_struct *tty)
        old_left = tty->receive_room;
        tty->receive_room = left;
 
+       raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
+
+       return left && !old_left;
+}
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
        /* Did this open up the receive buffer? We may need to flip */
-       if (left && !old_left) {
+       if (set_room(tty)) {
                WARN_RATELIMIT(tty->port->itty == NULL,
                                "scheduling with invalid itty\n");
                /* see if ldisc has been killed - if so, this means that
@@ -647,8 +658,7 @@ static void process_echoes(struct tty_struct *tty)
                        if (no_space_left)
                                break;
                } else {
-                       if (O_OPOST(tty) &&
-                           !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
+                       if (O_OPOST(tty)) {
                                int retval = do_output_char(c, tty, space);
                                if (retval < 0)
                                        break;
@@ -1454,9 +1464,9 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
                        tty->ops->flush_chars(tty);
        }
 
-       n_tty_set_room(tty);
+       set_room(tty);
 
-       if ((!ldata->icanon && (ldata->read_cnt >= tty->minimum_to_wake)) ||
+       if ((!ldata->icanon && (ldata->read_cnt >= ldata->minimum_to_wake)) ||
                L_EXTPROC(tty)) {
                kill_fasync(&tty->fasync, SIGIO, POLL_IN);
                if (waitqueue_active(&tty->read_wait))
@@ -1516,12 +1526,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
                wake_up_interruptible(&tty->read_wait);
 
        ldata->icanon = (L_ICANON(tty) != 0);
-       if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
-               ldata->raw = 1;
-               ldata->real_raw = 1;
-               n_tty_set_room(tty);
-               return;
-       }
+
        if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
            I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) ||
            I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) ||
@@ -1642,7 +1647,7 @@ static int n_tty_open(struct tty_struct *tty)
        tty->disc_data = ldata;
        reset_buffer_flags(tty->disc_data);
        ldata->column = 0;
-       tty->minimum_to_wake = 1;
+       ldata->minimum_to_wake = 1;
        tty->closing = 0;
        /* indicate buffer work may resume */
        clear_bit(TTY_LDISC_HALTED, &tty->flags);
@@ -1806,21 +1811,17 @@ do_it_again:
        minimum = time = 0;
        timeout = MAX_SCHEDULE_TIMEOUT;
        if (!ldata->icanon) {
-               time = (HZ / 10) * TIME_CHAR(tty);
                minimum = MIN_CHAR(tty);
                if (minimum) {
+                       time = (HZ / 10) * TIME_CHAR(tty);
                        if (time)
-                               tty->minimum_to_wake = 1;
+                               ldata->minimum_to_wake = 1;
                        else if (!waitqueue_active(&tty->read_wait) ||
-                                (tty->minimum_to_wake > minimum))
-                               tty->minimum_to_wake = minimum;
+                                (ldata->minimum_to_wake > minimum))
+                               ldata->minimum_to_wake = minimum;
                } else {
-                       timeout = 0;
-                       if (time) {
-                               timeout = time;
-                               time = 0;
-                       }
-                       tty->minimum_to_wake = minimum = 1;
+                       timeout = (HZ / 10) * TIME_CHAR(tty);
+                       ldata->minimum_to_wake = minimum = 1;
                }
        }
 
@@ -1860,9 +1861,9 @@ do_it_again:
                   TASK_RUNNING. */
                set_current_state(TASK_INTERRUPTIBLE);
 
-               if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
+               if (((minimum - (b - buf)) < ldata->minimum_to_wake) &&
                    ((minimum - (b - buf)) >= 1))
-                       tty->minimum_to_wake = (minimum - (b - buf));
+                       ldata->minimum_to_wake = (minimum - (b - buf));
 
                if (!input_available_p(tty, 0)) {
                        if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
@@ -1881,7 +1882,6 @@ do_it_again:
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       /* FIXME: does n_tty_set_room need locking ? */
                        n_tty_set_room(tty);
                        timeout = schedule_timeout(timeout);
                        continue;
@@ -1979,7 +1979,7 @@ do_it_again:
        remove_wait_queue(&tty->read_wait, &wait);
 
        if (!waitqueue_active(&tty->read_wait))
-               tty->minimum_to_wake = minimum;
+               ldata->minimum_to_wake = minimum;
 
        __set_current_state(TASK_RUNNING);
        size = b - buf;
@@ -2045,7 +2045,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
                        retval = -EIO;
                        break;
                }
-               if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
+               if (O_OPOST(tty)) {
                        while (nr > 0) {
                                ssize_t num = process_output_block(tty, b, nr);
                                if (num < 0) {
@@ -2111,6 +2111,7 @@ break_out:
 static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
                                                        poll_table *wait)
 {
+       struct n_tty_data *ldata = tty->disc_data;
        unsigned int mask = 0;
 
        poll_wait(file, &tty->read_wait, wait);
@@ -2125,9 +2126,9 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
                mask |= POLLHUP;
        if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
                if (MIN_CHAR(tty) && !TIME_CHAR(tty))
-                       tty->minimum_to_wake = MIN_CHAR(tty);
+                       ldata->minimum_to_wake = MIN_CHAR(tty);
                else
-                       tty->minimum_to_wake = 1;
+                       ldata->minimum_to_wake = 1;
        }
        if (tty->ops->write && !tty_is_writelocked(tty) &&
                        tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
@@ -2175,6 +2176,18 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
        }
 }
 
+static void n_tty_fasync(struct tty_struct *tty, int on)
+{
+       struct n_tty_data *ldata = tty->disc_data;
+
+       if (!waitqueue_active(&tty->read_wait)) {
+               if (on)
+                       ldata->minimum_to_wake = 1;
+               else if (!tty->fasync)
+                       ldata->minimum_to_wake = N_TTY_BUF_SIZE;
+       }
+}
+
 struct tty_ldisc_ops tty_ldisc_N_TTY = {
        .magic           = TTY_LDISC_MAGIC,
        .name            = "n_tty",
@@ -2188,7 +2201,8 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
        .set_termios     = n_tty_set_termios,
        .poll            = n_tty_poll,
        .receive_buf     = n_tty_receive_buf,
-       .write_wakeup    = n_tty_write_wakeup
+       .write_wakeup    = n_tty_write_wakeup,
+       .fasync          = n_tty_fasync,
 };
 
 /**
index 59bfaecc4e1438ecc771573c44c161b26fc3548d..abfd9908978188ffe3bfd7a9c3aa3e8c6700c5d9 100644 (file)
@@ -244,14 +244,9 @@ static void pty_flush_buffer(struct tty_struct *tty)
 
 static int pty_open(struct tty_struct *tty, struct file *filp)
 {
-       int     retval = -ENODEV;
-
        if (!tty || !tty->link)
-               goto out;
-
-       set_bit(TTY_IO_ERROR, &tty->flags);
+               return -ENODEV;
 
-       retval = -EIO;
        if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
                goto out;
        if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
@@ -262,9 +257,11 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
        clear_bit(TTY_IO_ERROR, &tty->flags);
        clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
        set_bit(TTY_THROTTLED, &tty->flags);
-       retval = 0;
+       return 0;
+
 out:
-       return retval;
+       set_bit(TTY_IO_ERROR, &tty->flags);
+       return -EIO;
 }
 
 static void pty_set_termios(struct tty_struct *tty,
index 097dff9c08ad9e4ebec835f20800e6d118931bd0..bb91b4713ebdb8c7ba8849cba643d046c5bc9973 100644 (file)
@@ -30,6 +30,12 @@ static int __init serial_init_chip(struct parisc_device *dev)
        unsigned long address;
        int err;
 
+#ifdef CONFIG_64BIT
+       extern int iosapic_serial_irq(int cellnum);
+       if (!dev->irq && (dev->id.sversion == 0xad))
+               dev->irq = iosapic_serial_irq(dev->mod_index-1);
+#endif
+
        if (!dev->irq) {
                /* We find some unattached serial ports by walking native
                 * busses.  These should be silently ignored.  Otherwise,
@@ -51,7 +57,8 @@ static int __init serial_init_chip(struct parisc_device *dev)
        memset(&uart, 0, sizeof(uart));
        uart.port.iotype        = UPIO_MEM;
        /* 7.272727MHz on Lasi.  Assumed the same for Dino, Wax and Timi. */
-       uart.port.uartclk       = 7272727;
+       uart.port.uartclk       = (dev->id.sversion != 0xad) ?
+                                       7272727 : 1843200;
        uart.port.mapbase       = address;
        uart.port.membase       = ioremap_nocache(address, 16);
        uart.port.irq   = dev->irq;
@@ -73,6 +80,7 @@ static struct parisc_device_id serial_tbl[] = {
        { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
        { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
        { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
+       { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad },
        { 0 }
 };
 
index 26e3a97ab157ed16d23825c1a62bec4d53cd81b9..c52948b368d81a1dbd6ea274ffa3058f1128ea37 100644 (file)
@@ -4797,10 +4797,6 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_VENDOR_ID_IBM, 0x0299,
                0, 0, pbn_b0_bt_2_115200 },
 
-       {       PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
-               0x1000, 0x0012,
-               0, 0, pbn_b0_bt_2_115200 },
-
        {       PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
                0xA000, 0x1000,
                0, 0, pbn_b0_1_115200 },
index 80fe91e64a527da06e82d9cf237be64a26faf662..a1ba94d648859ecc8c722877ff38fffaf5ce1c96 100644 (file)
@@ -12,9 +12,8 @@ config SERIAL_8250
          here are those that are setting up dedicated Ethernet WWW/FTP
          servers, or users that have one of the various bus mice instead of a
          serial mouse and don't intend to use their machine's standard serial
-         port for anything.  (Note that the Cyclades and Stallion multi
-         serial port drivers do not need this driver built in for them to
-         work.)
+         port for anything.  (Note that the Cyclades multi serial port driver
+         does not need this driver built in for it to work.)
 
          To compile this driver as a module, choose M here: the
          module will be called 8250.
index 7e7006fd404e62000b19b728274c0abf8a51bf38..46dd1c72feda3e6207cba2335d4a0406b76d05e5 100644 (file)
@@ -551,7 +551,7 @@ config BFIN_UART3_CTSRTS
          Enable hardware flow control in the driver.
 
 config SERIAL_IMX
-       bool "IMX serial port support"
+       tristate "IMX serial port support"
        depends on ARCH_MXC
        select SERIAL_CORE
        select RATIONAL
@@ -561,22 +561,21 @@ config SERIAL_IMX
 
 config SERIAL_IMX_CONSOLE
        bool "Console on IMX serial port"
-       depends on SERIAL_IMX
+       depends on SERIAL_IMX=y
        select SERIAL_CORE_CONSOLE
        help
-         If you have enabled the serial port on the Motorola IMX
+         If you have enabled the serial port on the Freescale IMX
          CPU you can make it the console by answering Y to this option.
 
          Even if you say Y here, the currently visible virtual console
          (/dev/tty0) will still be used as the system console by default, but
          you can alter that using a kernel command line option such as
-         "console=ttySA0". (Try "man bootparam" or see the documentation of
-         your boot loader (lilo or loadlin) about how to pass options to the
-         kernel at boot time.)
+         "console=ttymxc0". (Try "man bootparam" or see the documentation of
+         your bootloader about how to pass options to the kernel at boot time.)
 
 config SERIAL_UARTLITE
        tristate "Xilinx uartlite serial port support"
-       depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
+       depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE || ARCH_ZYNQ
        select SERIAL_CORE
        help
          Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1484,6 +1483,20 @@ config SERIAL_RP2_NR_UARTS
          If multiple cards are present, the default limit of 32 ports may
          need to be increased.
 
+config SERIAL_FSL_LPUART
+       tristate "Freescale lpuart serial port support"
+       select SERIAL_CORE
+       help
+         Support for the on-chip lpuart on some Freescale SOCs.
+
+config SERIAL_FSL_LPUART_CONSOLE
+       bool "Console on Freescale lpuart serial port"
+       depends on SERIAL_FSL_LPUART=y
+       select SERIAL_CORE_CONSOLE
+       help
+         If you have enabled the lpuart serial port on the Freescale SoCs,
+         you can make it the console by answering Y to this option.
+
 endmenu
 
 endif # TTY
index eedfec40e3dda242220cf1dc73b9ec1787c63cee..cf650f0cd6e4175adb72d9139c278dc33ec7b4fa 100644 (file)
@@ -85,3 +85,4 @@ obj-$(CONFIG_SERIAL_AR933X)   += ar933x_uart.o
 obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o
 obj-$(CONFIG_SERIAL_ARC)       += arc_uart.o
 obj-$(CONFIG_SERIAL_RP2)       += rp2.o
+obj-$(CONFIG_SERIAL_FSL_LPUART)        += fsl_lpuart.o
index 13471dd95793f39a6b8b4c96a5c5381c4327d8ca..1d46966e2a65cc742c8d8920fe8004d4bcad3a48 100644 (file)
@@ -604,7 +604,6 @@ static int altera_uart_remove(struct platform_device *pdev)
 
        if (port) {
                uart_remove_one_port(&altera_uart_driver, port);
-               platform_set_drvdata(pdev, NULL);
                port->mapbase = 0;
        }
 
index e2774f9ecd59f16915e0647028e643e616b9c4cb..ad41319d1d9b028eaeba3d0dcd08db5e745c0b78 100644 (file)
@@ -79,13 +79,12 @@ struct vendor_data {
        bool                    dma_threshold;
        bool                    cts_event_workaround;
 
-       unsigned int (*get_fifosize)(unsigned int periphid);
+       unsigned int (*get_fifosize)(struct amba_device *dev);
 };
 
-static unsigned int get_fifosize_arm(unsigned int periphid)
+static unsigned int get_fifosize_arm(struct amba_device *dev)
 {
-       unsigned int rev = (periphid >> 20) & 0xf;
-       return rev < 3 ? 16 : 32;
+       return amba_rev(dev) < 3 ? 16 : 32;
 }
 
 static struct vendor_data vendor_arm = {
@@ -98,7 +97,7 @@ static struct vendor_data vendor_arm = {
        .get_fifosize           = get_fifosize_arm,
 };
 
-static unsigned int get_fifosize_st(unsigned int periphid)
+static unsigned int get_fifosize_st(struct amba_device *dev)
 {
        return 64;
 }
@@ -2157,7 +2156,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
        uap->lcrh_rx = vendor->lcrh_rx;
        uap->lcrh_tx = vendor->lcrh_tx;
        uap->old_cr = 0;
-       uap->fifosize = vendor->get_fifosize(dev->periphid);
+       uap->fifosize = vendor->get_fifosize(dev);
        uap->port.dev = &dev->dev;
        uap->port.mapbase = dev->res.start;
        uap->port.membase = base;
index 3467462869ce2dce88ca8832002d7f294d5eaae6..691265faebbe608c555573513d4735be18c7d8e8 100644 (file)
@@ -1100,7 +1100,7 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
                 * Enable the peripheral clock for this serial port.
                 * This is called on uart_open() or a resume event.
                 */
-               clk_enable(atmel_port->clk);
+               clk_prepare_enable(atmel_port->clk);
 
                /* re-enable interrupts if we disabled some on suspend */
                UART_PUT_IER(port, atmel_port->backup_imr);
@@ -1114,7 +1114,7 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
                 * Disable the peripheral clock for this serial port.
                 * This is called on uart_close() or a suspend event.
                 */
-               clk_disable(atmel_port->clk);
+               clk_disable_unprepare(atmel_port->clk);
                break;
        default:
                printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
@@ -1458,9 +1458,10 @@ static void atmel_of_init_port(struct atmel_uart_port *atmel_port,
 /*
  * Configure the port from the platform device resource info.
  */
-static void atmel_init_port(struct atmel_uart_port *atmel_port,
+static int atmel_init_port(struct atmel_uart_port *atmel_port,
                                      struct platform_device *pdev)
 {
+       int ret;
        struct uart_port *port = &atmel_port->uart;
        struct atmel_uart_data *pdata = pdev->dev.platform_data;
 
@@ -1496,9 +1497,19 @@ static void atmel_init_port(struct atmel_uart_port *atmel_port,
        /* for console, the clock could already be configured */
        if (!atmel_port->clk) {
                atmel_port->clk = clk_get(&pdev->dev, "usart");
-               clk_enable(atmel_port->clk);
+               if (IS_ERR(atmel_port->clk)) {
+                       ret = PTR_ERR(atmel_port->clk);
+                       atmel_port->clk = NULL;
+                       return ret;
+               }
+               ret = clk_prepare_enable(atmel_port->clk);
+               if (ret) {
+                       clk_put(atmel_port->clk);
+                       atmel_port->clk = NULL;
+                       return ret;
+               }
                port->uartclk = clk_get_rate(atmel_port->clk);
-               clk_disable(atmel_port->clk);
+               clk_disable_unprepare(atmel_port->clk);
                /* only enable clock when USART is in use */
        }
 
@@ -1511,6 +1522,8 @@ static void atmel_init_port(struct atmel_uart_port *atmel_port,
        } else {
                atmel_port->tx_done_mask = ATMEL_US_TXRDY;
        }
+
+       return 0;
 }
 
 struct platform_device *atmel_default_console_device;  /* the serial console device */
@@ -1601,6 +1614,7 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
 
 static int __init atmel_console_setup(struct console *co, char *options)
 {
+       int ret;
        struct uart_port *port = &atmel_ports[co->index].uart;
        int baud = 115200;
        int bits = 8;
@@ -1612,7 +1626,9 @@ static int __init atmel_console_setup(struct console *co, char *options)
                return -ENODEV;
        }
 
-       clk_enable(atmel_ports[co->index].clk);
+       ret = clk_prepare_enable(atmel_ports[co->index].clk);
+       if (ret)
+               return ret;
 
        UART_PUT_IDR(port, -1);
        UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
@@ -1645,6 +1661,7 @@ static struct console atmel_console = {
  */
 static int __init atmel_console_init(void)
 {
+       int ret;
        if (atmel_default_console_device) {
                struct atmel_uart_data *pdata =
                        atmel_default_console_device->dev.platform_data;
@@ -1655,7 +1672,9 @@ static int __init atmel_console_init(void)
                port->uart.line = id;
 
                add_preferred_console(ATMEL_DEVICENAME, id, NULL);
-               atmel_init_port(port, atmel_default_console_device);
+               ret = atmel_init_port(port, atmel_default_console_device);
+               if (ret)
+                       return ret;
                register_console(&atmel_console);
        }
 
@@ -1786,7 +1805,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
        port->backup_imr = 0;
        port->uart.line = ret;
 
-       atmel_init_port(port, pdev);
+       ret = atmel_init_port(port, pdev);
+       if (ret)
+               goto err;
 
        pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
        if (IS_ERR(pinctrl)) {
@@ -1812,9 +1833,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
                        && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
                /*
                 * The serial core enabled the clock for us, so undo
-                * the clk_enable() in atmel_console_setup()
+                * the clk_prepare_enable() in atmel_console_setup()
                 */
-               clk_disable(port->clk);
+               clk_disable_unprepare(port->clk);
        }
 #endif
 
index 97f4e1858649f42d4bc126431927325962f5bae1..f7672cae53212103c9283b2ff595d83a7f63ff17 100644 (file)
@@ -1384,7 +1384,7 @@ static int cpm_uart_probe(struct platform_device *ofdev)
        if (index >= UART_NR)
                return -ENODEV;
 
-       dev_set_drvdata(&ofdev->dev, pinfo);
+       platform_set_drvdata(ofdev, pinfo);
 
        /* initialize the device pointer for the port */
        pinfo->port.dev = &ofdev->dev;
@@ -1398,7 +1398,7 @@ static int cpm_uart_probe(struct platform_device *ofdev)
 
 static int cpm_uart_remove(struct platform_device *ofdev)
 {
-       struct uart_cpm_port *pinfo = dev_get_drvdata(&ofdev->dev);
+       struct uart_cpm_port *pinfo = platform_get_drvdata(ofdev);
        return uart_remove_one_port(&cpm_reg, &pinfo->port);
 }
 
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
new file mode 100644 (file)
index 0000000..263cfaa
--- /dev/null
@@ -0,0 +1,874 @@
+/*
+ *  Freescale lpuart serial port driver
+ *
+ *  Copyright 2012-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#if defined(CONFIG_SERIAL_FSL_LPUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/console.h>
+#include <linux/serial_core.h>
+#include <linux/tty_flip.h>
+
+/* All registers are 8-bit width */
+#define UARTBDH                        0x00
+#define UARTBDL                        0x01
+#define UARTCR1                        0x02
+#define UARTCR2                        0x03
+#define UARTSR1                        0x04
+#define UARTCR3                        0x06
+#define UARTDR                 0x07
+#define UARTCR4                        0x0a
+#define UARTCR5                        0x0b
+#define UARTMODEM              0x0d
+#define UARTPFIFO              0x10
+#define UARTCFIFO              0x11
+#define UARTSFIFO              0x12
+#define UARTTWFIFO             0x13
+#define UARTTCFIFO             0x14
+#define UARTRWFIFO             0x15
+
+#define UARTBDH_LBKDIE         0x80
+#define UARTBDH_RXEDGIE                0x40
+#define UARTBDH_SBR_MASK       0x1f
+
+#define UARTCR1_LOOPS          0x80
+#define UARTCR1_RSRC           0x20
+#define UARTCR1_M              0x10
+#define UARTCR1_WAKE           0x08
+#define UARTCR1_ILT            0x04
+#define UARTCR1_PE             0x02
+#define UARTCR1_PT             0x01
+
+#define UARTCR2_TIE            0x80
+#define UARTCR2_TCIE           0x40
+#define UARTCR2_RIE            0x20
+#define UARTCR2_ILIE           0x10
+#define UARTCR2_TE             0x08
+#define UARTCR2_RE             0x04
+#define UARTCR2_RWU            0x02
+#define UARTCR2_SBK            0x01
+
+#define UARTSR1_TDRE           0x80
+#define UARTSR1_TC             0x40
+#define UARTSR1_RDRF           0x20
+#define UARTSR1_IDLE           0x10
+#define UARTSR1_OR             0x08
+#define UARTSR1_NF             0x04
+#define UARTSR1_FE             0x02
+#define UARTSR1_PE             0x01
+
+#define UARTCR3_R8             0x80
+#define UARTCR3_T8             0x40
+#define UARTCR3_TXDIR          0x20
+#define UARTCR3_TXINV          0x10
+#define UARTCR3_ORIE           0x08
+#define UARTCR3_NEIE           0x04
+#define UARTCR3_FEIE           0x02
+#define UARTCR3_PEIE           0x01
+
+#define UARTCR4_MAEN1          0x80
+#define UARTCR4_MAEN2          0x40
+#define UARTCR4_M10            0x20
+#define UARTCR4_BRFA_MASK      0x1f
+#define UARTCR4_BRFA_OFF       0
+
+#define UARTCR5_TDMAS          0x80
+#define UARTCR5_RDMAS          0x20
+
+#define UARTMODEM_RXRTSE       0x08
+#define UARTMODEM_TXRTSPOL     0x04
+#define UARTMODEM_TXRTSE       0x02
+#define UARTMODEM_TXCTSE       0x01
+
+#define UARTPFIFO_TXFE         0x80
+#define UARTPFIFO_FIFOSIZE_MASK        0x7
+#define UARTPFIFO_TXSIZE_OFF   4
+#define UARTPFIFO_RXFE         0x08
+#define UARTPFIFO_RXSIZE_OFF   0
+
+#define UARTCFIFO_TXFLUSH      0x80
+#define UARTCFIFO_RXFLUSH      0x40
+#define UARTCFIFO_RXOFE                0x04
+#define UARTCFIFO_TXOFE                0x02
+#define UARTCFIFO_RXUFE                0x01
+
+#define UARTSFIFO_TXEMPT       0x80
+#define UARTSFIFO_RXEMPT       0x40
+#define UARTSFIFO_RXOF         0x04
+#define UARTSFIFO_TXOF         0x02
+#define UARTSFIFO_RXUF         0x01
+
+#define DRIVER_NAME    "fsl-lpuart"
+#define DEV_NAME       "ttyLP"
+#define UART_NR                6
+
+struct lpuart_port {
+       struct uart_port        port;
+       struct clk              *clk;
+       unsigned int            txfifo_size;
+       unsigned int            rxfifo_size;
+};
+
+static struct of_device_id lpuart_dt_ids[] = {
+       {
+               .compatible = "fsl,vf610-lpuart",
+       },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
+
+static void lpuart_stop_tx(struct uart_port *port)
+{
+       unsigned char temp;
+
+       temp = readb(port->membase + UARTCR2);
+       temp &= ~(UARTCR2_TIE | UARTCR2_TCIE);
+       writeb(temp, port->membase + UARTCR2);
+}
+
+static void lpuart_stop_rx(struct uart_port *port)
+{
+       unsigned char temp;
+
+       temp = readb(port->membase + UARTCR2);
+       writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2);
+}
+
+static void lpuart_enable_ms(struct uart_port *port)
+{
+}
+
+static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
+{
+       struct circ_buf *xmit = &sport->port.state->xmit;
+
+       while (!uart_circ_empty(xmit) &&
+               (readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size)) {
+               writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               sport->port.icount.tx++;
+       }
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(&sport->port);
+
+       if (uart_circ_empty(xmit))
+               lpuart_stop_tx(&sport->port);
+}
+
+static void lpuart_start_tx(struct uart_port *port)
+{
+       struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+       unsigned char temp;
+
+       temp = readb(port->membase + UARTCR2);
+       writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
+
+       if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
+               lpuart_transmit_buffer(sport);
+}
+
+static irqreturn_t lpuart_txint(int irq, void *dev_id)
+{
+       struct lpuart_port *sport = dev_id;
+       struct circ_buf *xmit = &sport->port.state->xmit;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sport->port.lock, flags);
+       if (sport->port.x_char) {
+               writeb(sport->port.x_char, sport->port.membase + UARTDR);
+               goto out;
+       }
+
+       if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
+               lpuart_stop_tx(&sport->port);
+               goto out;
+       }
+
+       lpuart_transmit_buffer(sport);
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(&sport->port);
+
+out:
+       spin_unlock_irqrestore(&sport->port.lock, flags);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t lpuart_rxint(int irq, void *dev_id)
+{
+       struct lpuart_port *sport = dev_id;
+       unsigned int flg, ignored = 0;
+       struct tty_port *port = &sport->port.state->port;
+       unsigned long flags;
+       unsigned char rx, sr;
+
+       spin_lock_irqsave(&sport->port.lock, flags);
+
+       while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
+               flg = TTY_NORMAL;
+               sport->port.icount.rx++;
+               /*
+                * to clear the FE, OR, NF, FE, PE flags,
+                * read SR1 then read DR
+                */
+               sr = readb(sport->port.membase + UARTSR1);
+               rx = readb(sport->port.membase + UARTDR);
+
+               if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
+                       continue;
+
+               if (sr & (UARTSR1_PE | UARTSR1_OR | UARTSR1_FE)) {
+                       if (sr & UARTSR1_PE)
+                               sport->port.icount.parity++;
+                       else if (sr & UARTSR1_FE)
+                               sport->port.icount.frame++;
+
+                       if (sr & UARTSR1_OR)
+                               sport->port.icount.overrun++;
+
+                       if (sr & sport->port.ignore_status_mask) {
+                               if (++ignored > 100)
+                                       goto out;
+                               continue;
+                       }
+
+                       sr &= sport->port.read_status_mask;
+
+                       if (sr & UARTSR1_PE)
+                               flg = TTY_PARITY;
+                       else if (sr & UARTSR1_FE)
+                               flg = TTY_FRAME;
+
+                       if (sr & UARTSR1_OR)
+                               flg = TTY_OVERRUN;
+
+#ifdef SUPPORT_SYSRQ
+                       sport->port.sysrq = 0;
+#endif
+               }
+
+               tty_insert_flip_char(port, rx, flg);
+       }
+
+out:
+       spin_unlock_irqrestore(&sport->port.lock, flags);
+
+       tty_flip_buffer_push(port);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t lpuart_int(int irq, void *dev_id)
+{
+       struct lpuart_port *sport = dev_id;
+       unsigned char sts;
+
+       sts = readb(sport->port.membase + UARTSR1);
+
+       if (sts & UARTSR1_RDRF)
+               lpuart_rxint(irq, dev_id);
+
+       if (sts & UARTSR1_TDRE &&
+               !(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS))
+               lpuart_txint(irq, dev_id);
+
+       return IRQ_HANDLED;
+}
+
+/* return TIOCSER_TEMT when transmitter is not busy */
+static unsigned int lpuart_tx_empty(struct uart_port *port)
+{
+       return (readb(port->membase + UARTSR1) & UARTSR1_TC) ?
+               TIOCSER_TEMT : 0;
+}
+
+static unsigned int lpuart_get_mctrl(struct uart_port *port)
+{
+       unsigned int temp = 0;
+       unsigned char reg;
+
+       reg = readb(port->membase + UARTMODEM);
+       if (reg & UARTMODEM_TXCTSE)
+               temp |= TIOCM_CTS;
+
+       if (reg & UARTMODEM_RXRTSE)
+               temp |= TIOCM_RTS;
+
+       return temp;
+}
+
+static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+       unsigned char temp;
+
+       temp = readb(port->membase + UARTMODEM) &
+                       ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+
+       if (mctrl & TIOCM_RTS)
+               temp |= UARTMODEM_RXRTSE;
+
+       if (mctrl & TIOCM_CTS)
+               temp |= UARTMODEM_TXCTSE;
+
+       writeb(temp, port->membase + UARTMODEM);
+}
+
+static void lpuart_break_ctl(struct uart_port *port, int break_state)
+{
+       unsigned char temp;
+
+       temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
+
+       if (break_state != 0)
+               temp |= UARTCR2_SBK;
+
+       writeb(temp, port->membase + UARTCR2);
+}
+
+static void lpuart_setup_watermark(struct lpuart_port *sport)
+{
+       unsigned char val, cr2;
+
+       cr2 = readb(sport->port.membase + UARTCR2);
+       cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_TE |
+                       UARTCR2_RIE | UARTCR2_RE);
+       writeb(cr2, sport->port.membase + UARTCR2);
+
+       /* determine FIFO size and enable FIFO mode */
+       val = readb(sport->port.membase + UARTPFIFO);
+
+       sport->txfifo_size = 0x1 << (((val >> UARTPFIFO_TXSIZE_OFF) &
+               UARTPFIFO_FIFOSIZE_MASK) + 1);
+
+       sport->rxfifo_size = 0x1 << (((val >> UARTPFIFO_RXSIZE_OFF) &
+               UARTPFIFO_FIFOSIZE_MASK) + 1);
+
+       writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
+                       sport->port.membase + UARTPFIFO);
+
+       /* flush Tx and Rx FIFO */
+       writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+                       sport->port.membase + UARTCFIFO);
+
+       writeb(2, sport->port.membase + UARTTWFIFO);
+       writeb(1, sport->port.membase + UARTRWFIFO);
+}
+
+static int lpuart_startup(struct uart_port *port)
+{
+       struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+       int ret;
+       unsigned long flags;
+       unsigned char temp;
+
+       ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
+                               DRIVER_NAME, sport);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&sport->port.lock, flags);
+
+       lpuart_setup_watermark(sport);
+
+       temp = readb(sport->port.membase + UARTCR2);
+       temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
+       writeb(temp, sport->port.membase + UARTCR2);
+
+       spin_unlock_irqrestore(&sport->port.lock, flags);
+       return 0;
+}
+
+static void lpuart_shutdown(struct uart_port *port)
+{
+       struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+       unsigned char temp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       /* disable Rx/Tx and interrupts */
+       temp = readb(port->membase + UARTCR2);
+       temp &= ~(UARTCR2_TE | UARTCR2_RE |
+                       UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
+       writeb(temp, port->membase + UARTCR2);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       devm_free_irq(port->dev, port->irq, sport);
+}
+
+static void
+lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+                  struct ktermios *old)
+{
+       struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+       unsigned long flags;
+       unsigned char cr1, old_cr1, old_cr2, cr4, bdh, modem;
+       unsigned int  baud;
+       unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+       unsigned int sbr, brfa;
+
+       cr1 = old_cr1 = readb(sport->port.membase + UARTCR1);
+       old_cr2 = readb(sport->port.membase + UARTCR2);
+       cr4 = readb(sport->port.membase + UARTCR4);
+       bdh = readb(sport->port.membase + UARTBDH);
+       modem = readb(sport->port.membase + UARTMODEM);
+       /*
+        * only support CS8 and CS7, and for CS7 must enable PE.
+        * supported mode:
+        *  - (7,e/o,1)
+        *  - (8,n,1)
+        *  - (8,m/s,1)
+        *  - (8,e/o,1)
+        */
+       while ((termios->c_cflag & CSIZE) != CS8 &&
+               (termios->c_cflag & CSIZE) != CS7) {
+               termios->c_cflag &= ~CSIZE;
+               termios->c_cflag |= old_csize;
+               old_csize = CS8;
+       }
+
+       if ((termios->c_cflag & CSIZE) == CS8 ||
+               (termios->c_cflag & CSIZE) == CS7)
+               cr1 = old_cr1 & ~UARTCR1_M;
+
+       if (termios->c_cflag & CMSPAR) {
+               if ((termios->c_cflag & CSIZE) != CS8) {
+                       termios->c_cflag &= ~CSIZE;
+                       termios->c_cflag |= CS8;
+               }
+               cr1 |= UARTCR1_M;
+       }
+
+       if (termios->c_cflag & CRTSCTS) {
+               modem |= (UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+       } else {
+               termios->c_cflag &= ~CRTSCTS;
+               modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+       }
+
+       if (termios->c_cflag & CSTOPB)
+               termios->c_cflag &= ~CSTOPB;
+
+       /* parity must be enabled when CS7 to match 8-bits format */
+       if ((termios->c_cflag & CSIZE) == CS7)
+               termios->c_cflag |= PARENB;
+
+       if ((termios->c_cflag & PARENB)) {
+               if (termios->c_cflag & CMSPAR) {
+                       cr1 &= ~UARTCR1_PE;
+                       cr1 |= UARTCR1_M;
+               } else {
+                       cr1 |= UARTCR1_PE;
+                       if ((termios->c_cflag & CSIZE) == CS8)
+                               cr1 |= UARTCR1_M;
+                       if (termios->c_cflag & PARODD)
+                               cr1 |= UARTCR1_PT;
+                       else
+                               cr1 &= ~UARTCR1_PT;
+               }
+       }
+
+       /* ask the core to calculate the divisor */
+       baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+
+       spin_lock_irqsave(&sport->port.lock, flags);
+
+       sport->port.read_status_mask = 0;
+       if (termios->c_iflag & INPCK)
+               sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE);
+       if (termios->c_iflag & (BRKINT | PARMRK))
+               sport->port.read_status_mask |= UARTSR1_FE;
+
+       /* characters to ignore */
+       sport->port.ignore_status_mask = 0;
+       if (termios->c_iflag & IGNPAR)
+               sport->port.ignore_status_mask |= UARTSR1_PE;
+       if (termios->c_iflag & IGNBRK) {
+               sport->port.ignore_status_mask |= UARTSR1_FE;
+               /*
+                * if we're ignoring parity and break indicators,
+                * ignore overruns too (for real raw support).
+                */
+               if (termios->c_iflag & IGNPAR)
+                       sport->port.ignore_status_mask |= UARTSR1_OR;
+       }
+
+       /* update the per-port timeout */
+       uart_update_timeout(port, termios->c_cflag, baud);
+
+       /* wait transmit engin complete */
+       while (!(readb(sport->port.membase + UARTSR1) & UARTSR1_TC))
+               barrier();
+
+       /* disable transmit and receive */
+       writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE),
+                       sport->port.membase + UARTCR2);
+
+       sbr = sport->port.uartclk / (16 * baud);
+       brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud;
+       bdh &= ~UARTBDH_SBR_MASK;
+       bdh |= (sbr >> 8) & 0x1F;
+       cr4 &= ~UARTCR4_BRFA_MASK;
+       brfa &= UARTCR4_BRFA_MASK;
+       writeb(cr4 | brfa, sport->port.membase + UARTCR4);
+       writeb(bdh, sport->port.membase + UARTBDH);
+       writeb(sbr & 0xFF, sport->port.membase + UARTBDL);
+       writeb(cr1, sport->port.membase + UARTCR1);
+       writeb(modem, sport->port.membase + UARTMODEM);
+
+       /* restore control register */
+       writeb(old_cr2, sport->port.membase + UARTCR2);
+
+       spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static const char *lpuart_type(struct uart_port *port)
+{
+       return "FSL_LPUART";
+}
+
+static void lpuart_release_port(struct uart_port *port)
+{
+       /* nothing to do */
+}
+
+static int lpuart_request_port(struct uart_port *port)
+{
+       return  0;
+}
+
+/* configure/autoconfigure the port */
+static void lpuart_config_port(struct uart_port *port, int flags)
+{
+       if (flags & UART_CONFIG_TYPE)
+               port->type = PORT_LPUART;
+}
+
+static int lpuart_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+       int ret = 0;
+
+       if (ser->type != PORT_UNKNOWN && ser->type != PORT_LPUART)
+               ret = -EINVAL;
+       if (port->irq != ser->irq)
+               ret = -EINVAL;
+       if (ser->io_type != UPIO_MEM)
+               ret = -EINVAL;
+       if (port->uartclk / 16 != ser->baud_base)
+               ret = -EINVAL;
+       if (port->iobase != ser->port)
+               ret = -EINVAL;
+       if (ser->hub6 != 0)
+               ret = -EINVAL;
+       return ret;
+}
+
+static struct uart_ops lpuart_pops = {
+       .tx_empty       = lpuart_tx_empty,
+       .set_mctrl      = lpuart_set_mctrl,
+       .get_mctrl      = lpuart_get_mctrl,
+       .stop_tx        = lpuart_stop_tx,
+       .start_tx       = lpuart_start_tx,
+       .stop_rx        = lpuart_stop_rx,
+       .enable_ms      = lpuart_enable_ms,
+       .break_ctl      = lpuart_break_ctl,
+       .startup        = lpuart_startup,
+       .shutdown       = lpuart_shutdown,
+       .set_termios    = lpuart_set_termios,
+       .type           = lpuart_type,
+       .request_port   = lpuart_request_port,
+       .release_port   = lpuart_release_port,
+       .config_port    = lpuart_config_port,
+       .verify_port    = lpuart_verify_port,
+};
+
+static struct lpuart_port *lpuart_ports[UART_NR];
+
+#ifdef CONFIG_SERIAL_FSL_LPUART_CONSOLE
+static void lpuart_console_putchar(struct uart_port *port, int ch)
+{
+       while (!(readb(port->membase + UARTSR1) & UARTSR1_TDRE))
+               barrier();
+
+       writeb(ch, port->membase + UARTDR);
+}
+
+static void
+lpuart_console_write(struct console *co, const char *s, unsigned int count)
+{
+       struct lpuart_port *sport = lpuart_ports[co->index];
+       unsigned char  old_cr2, cr2;
+
+       /* first save CR2 and then disable interrupts */
+       cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
+       cr2 |= (UARTCR2_TE |  UARTCR2_RE);
+       cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
+       writeb(cr2, sport->port.membase + UARTCR2);
+
+       uart_console_write(&sport->port, s, count, lpuart_console_putchar);
+
+       /* wait for transmitter finish complete and restore CR2 */
+       while (!(readb(sport->port.membase + UARTSR1) & UARTSR1_TC))
+               barrier();
+
+       writeb(old_cr2, sport->port.membase + UARTCR2);
+}
+
+/*
+ * if the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ */
+static void __init
+lpuart_console_get_options(struct lpuart_port *sport, int *baud,
+                          int *parity, int *bits)
+{
+       unsigned char cr, bdh, bdl, brfa;
+       unsigned int sbr, uartclk, baud_raw;
+
+       cr = readb(sport->port.membase + UARTCR2);
+       cr &= UARTCR2_TE | UARTCR2_RE;
+       if (!cr)
+               return;
+
+       /* ok, the port was enabled */
+
+       cr = readb(sport->port.membase + UARTCR1);
+
+       *parity = 'n';
+       if (cr & UARTCR1_PE) {
+               if (cr & UARTCR1_PT)
+                       *parity = 'o';
+               else
+                       *parity = 'e';
+       }
+
+       if (cr & UARTCR1_M)
+               *bits = 9;
+       else
+               *bits = 8;
+
+       bdh = readb(sport->port.membase + UARTBDH);
+       bdh &= UARTBDH_SBR_MASK;
+       bdl = readb(sport->port.membase + UARTBDL);
+       sbr = bdh;
+       sbr <<= 8;
+       sbr |= bdl;
+       brfa = readb(sport->port.membase + UARTCR4);
+       brfa &= UARTCR4_BRFA_MASK;
+
+       uartclk = clk_get_rate(sport->clk);
+       /*
+        * baud = mod_clk/(16*(sbr[13]+(brfa)/32)
+        */
+       baud_raw = uartclk / (16 * (sbr + brfa / 32));
+
+       if (*baud != baud_raw)
+               printk(KERN_INFO "Serial: Console lpuart rounded baud rate"
+                               "from %d to %d\n", baud_raw, *baud);
+}
+
+static int __init lpuart_console_setup(struct console *co, char *options)
+{
+       struct lpuart_port *sport;
+       int baud = 115200;
+       int bits = 8;
+       int parity = 'n';
+       int flow = 'n';
+
+       /*
+        * check whether an invalid uart number has been specified, and
+        * if so, search for the first available port that does have
+        * console support.
+        */
+       if (co->index == -1 || co->index >= ARRAY_SIZE(lpuart_ports))
+               co->index = 0;
+
+       sport = lpuart_ports[co->index];
+       if (sport == NULL)
+               return -ENODEV;
+
+       if (options)
+               uart_parse_options(options, &baud, &parity, &bits, &flow);
+       else
+               lpuart_console_get_options(sport, &baud, &parity, &bits);
+
+       lpuart_setup_watermark(sport);
+
+       return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver lpuart_reg;
+static struct console lpuart_console = {
+       .name           = DEV_NAME,
+       .write          = lpuart_console_write,
+       .device         = uart_console_device,
+       .setup          = lpuart_console_setup,
+       .flags          = CON_PRINTBUFFER,
+       .index          = -1,
+       .data           = &lpuart_reg,
+};
+
+#define LPUART_CONSOLE (&lpuart_console)
+#else
+#define LPUART_CONSOLE NULL
+#endif
+
+static struct uart_driver lpuart_reg = {
+       .owner          = THIS_MODULE,
+       .driver_name    = DRIVER_NAME,
+       .dev_name       = DEV_NAME,
+       .nr             = ARRAY_SIZE(lpuart_ports),
+       .cons           = LPUART_CONSOLE,
+};
+
+static int lpuart_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct lpuart_port *sport;
+       struct resource *res;
+       int ret;
+
+       sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
+       if (!sport)
+               return -ENOMEM;
+
+       pdev->dev.coherent_dma_mask = 0;
+
+       ret = of_alias_get_id(np, "serial");
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+               return ret;
+       }
+       sport->port.line = ret;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       sport->port.mapbase = res->start;
+       sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(sport->port.membase))
+               return PTR_ERR(sport->port.membase);
+
+       sport->port.dev = &pdev->dev;
+       sport->port.type = PORT_LPUART;
+       sport->port.iotype = UPIO_MEM;
+       sport->port.irq = platform_get_irq(pdev, 0);
+       sport->port.ops = &lpuart_pops;
+       sport->port.flags = UPF_BOOT_AUTOCONF;
+
+       sport->clk = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(sport->clk)) {
+               ret = PTR_ERR(sport->clk);
+               dev_err(&pdev->dev, "failed to get uart clk: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(sport->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable uart clk: %d\n", ret);
+               return ret;
+       }
+
+       sport->port.uartclk = clk_get_rate(sport->clk);
+
+       lpuart_ports[sport->port.line] = sport;
+
+       platform_set_drvdata(pdev, &sport->port);
+
+       ret = uart_add_one_port(&lpuart_reg, &sport->port);
+       if (ret) {
+               clk_disable_unprepare(sport->clk);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int lpuart_remove(struct platform_device *pdev)
+{
+       struct lpuart_port *sport = platform_get_drvdata(pdev);
+
+       uart_remove_one_port(&lpuart_reg, &sport->port);
+
+       clk_disable_unprepare(sport->clk);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int lpuart_suspend(struct device *dev)
+{
+       struct lpuart_port *sport = dev_get_drvdata(dev);
+
+       uart_suspend_port(&lpuart_reg, &sport->port);
+
+       return 0;
+}
+
+static int lpuart_resume(struct device *dev)
+{
+       struct lpuart_port *sport = dev_get_drvdata(dev);
+
+       uart_resume_port(&lpuart_reg, &sport->port);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume);
+
+static struct platform_driver lpuart_driver = {
+       .probe          = lpuart_probe,
+       .remove         = lpuart_remove,
+       .driver         = {
+               .name   = "fsl-lpuart",
+               .owner  = THIS_MODULE,
+               .of_match_table = lpuart_dt_ids,
+               .pm     = &lpuart_pm_ops,
+       },
+};
+
+static int __init lpuart_serial_init(void)
+{
+       int ret;
+
+       pr_info("serial: Freescale lpuart driver\n");
+
+       ret = uart_register_driver(&lpuart_reg);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&lpuart_driver);
+       if (ret)
+               uart_unregister_driver(&lpuart_reg);
+
+       return 0;
+}
+
+static void __exit lpuart_serial_exit(void)
+{
+       platform_driver_unregister(&lpuart_driver);
+       uart_unregister_driver(&lpuart_reg);
+}
+
+module_init(lpuart_serial_init);
+module_exit(lpuart_serial_exit);
+
+MODULE_DESCRIPTION("Freescale lpuart serial port driver");
+MODULE_LICENSE("GPL v2");
index 8cdfbd365892146611b772bbfb217a8f48938c3a..415cec62073fbc61d9770e6a8c613a42f2ba6865 100644 (file)
@@ -201,6 +201,7 @@ struct imx_port {
        unsigned int            old_status;
        int                     txirq, rxirq, rtsirq;
        unsigned int            have_rtscts:1;
+       unsigned int            dte_mode:1;
        unsigned int            use_irda:1;
        unsigned int            irda_inv_rx:1;
        unsigned int            irda_inv_tx:1;
@@ -271,6 +272,7 @@ static inline int is_imx21_uart(struct imx_port *sport)
 /*
  * Save and restore functions for UCR1, UCR2 and UCR3 registers
  */
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_IMX_CONSOLE)
 static void imx_port_ucrs_save(struct uart_port *port,
                               struct imx_port_ucrs *ucr)
 {
@@ -288,6 +290,7 @@ static void imx_port_ucrs_restore(struct uart_port *port,
        writel(ucr->ucr2, port->membase + UCR2);
        writel(ucr->ucr3, port->membase + UCR3);
 }
+#endif
 
 /*
  * Handle any change of modem status signal since we were last called.
@@ -449,6 +452,13 @@ static void imx_start_tx(struct uart_port *port)
                temp &= ~(UCR1_RRDYEN);
                writel(temp, sport->port.membase + UCR1);
        }
+       /* Clear any pending ORE flag before enabling interrupt */
+       temp = readl(sport->port.membase + USR2);
+       writel(temp | USR2_ORE, sport->port.membase + USR2);
+
+       temp = readl(sport->port.membase + UCR4);
+       temp |= UCR4_OREN;
+       writel(temp, sport->port.membase + UCR4);
 
        temp = readl(sport->port.membase + UCR1);
        writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
@@ -582,6 +592,7 @@ static irqreturn_t imx_int(int irq, void *dev_id)
 {
        struct imx_port *sport = dev_id;
        unsigned int sts;
+       unsigned int sts2;
 
        sts = readl(sport->port.membase + USR1);
 
@@ -598,6 +609,13 @@ static irqreturn_t imx_int(int irq, void *dev_id)
        if (sts & USR1_AWAKE)
                writel(USR1_AWAKE, sport->port.membase + USR1);
 
+       sts2 = readl(sport->port.membase + USR2);
+       if (sts2 & USR2_ORE) {
+               dev_err(sport->port.dev, "Rx FIFO overrun\n");
+               sport->port.icount.overrun++;
+               writel(sts2 | USR2_ORE, sport->port.membase + USR2);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -684,6 +702,17 @@ static int imx_startup(struct uart_port *port)
        int retval;
        unsigned long flags, temp;
 
+       if (!uart_console(port)) {
+               retval = clk_prepare_enable(sport->clk_per);
+               if (retval)
+                       goto error_out1;
+               retval = clk_prepare_enable(sport->clk_ipg);
+               if (retval) {
+                       clk_disable_unprepare(sport->clk_per);
+                       goto error_out1;
+               }
+       }
+
        imx_setup_ufcr(sport, 0);
 
        /* disable the DREN bit (Data Ready interrupt enable) before
@@ -871,6 +900,11 @@ static void imx_shutdown(struct uart_port *port)
 
        writel(temp, sport->port.membase + UCR1);
        spin_unlock_irqrestore(&sport->port.lock, flags);
+
+       if (!uart_console(&sport->port)) {
+               clk_disable_unprepare(sport->clk_per);
+               clk_disable_unprepare(sport->clk_ipg);
+       }
 }
 
 static void
@@ -1007,6 +1041,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
 
        ufcr = readl(sport->port.membase + UFCR);
        ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
+       if (sport->dte_mode)
+               ufcr |= UFCR_DCEDTE;
        writel(ufcr, sport->port.membase + UFCR);
 
        writel(num, sport->port.membase + UBIR);
@@ -1431,6 +1467,9 @@ static int serial_imx_probe_dt(struct imx_port *sport,
        if (of_get_property(np, "fsl,irda-mode", NULL))
                sport->use_irda = 1;
 
+       if (of_get_property(np, "fsl,dte-mode", NULL))
+               sport->dte_mode = 1;
+
        sport->devdata = of_id->data;
 
        return 0;
@@ -1544,6 +1583,11 @@ static int serial_imx_probe(struct platform_device *pdev)
                goto deinit;
        platform_set_drvdata(pdev, sport);
 
+       if (!uart_console(&sport->port)) {
+               clk_disable_unprepare(sport->clk_per);
+               clk_disable_unprepare(sport->clk_ipg);
+       }
+
        return 0;
 deinit:
        if (pdata && pdata->exit)
@@ -1565,9 +1609,6 @@ static int serial_imx_remove(struct platform_device *pdev)
 
        uart_remove_one_port(&imx_reg, &sport->port);
 
-       clk_disable_unprepare(sport->clk_per);
-       clk_disable_unprepare(sport->clk_ipg);
-
        if (pdata && pdata->exit)
                pdata->exit(pdev);
 
index 5f4765a7a5c54d6d3b845c9e9f7df402978afbff..e266eca0ec7642d26dfeae72d70d0170b7308fc0 100644 (file)
  *    be triggered
  */
 
+#if defined(CONFIG_SERIAL_MFD_HSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/console.h>
index f51b280f3bf280b882130ec92b499be65937eed2..e1280a20b7a2f4c840e400039d79a40196ae7e5a 100644 (file)
@@ -84,16 +84,6 @@ static void mpc52xx_uart_of_enumerate(void);
 static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id);
 static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port);
 
-
-/* Simple macro to test if a port is console or not. This one is taken
- * for serial_core.c and maybe should be moved to serial_core.h ? */
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-#define uart_console(port) \
-       ((port)->cons && (port)->cons->index == (port)->line)
-#else
-#define uart_console(port)     (0)
-#endif
-
 /* ======================================================================== */
 /* PSC fifo operations for isolating differences between 52xx and 512x      */
 /* ======================================================================== */
@@ -122,6 +112,15 @@ struct psc_ops {
        void            (*fifoc_uninit)(void);
        void            (*get_irq)(struct uart_port *, struct device_node *);
        irqreturn_t     (*handle_irq)(struct uart_port *port);
+       u16             (*get_status)(struct uart_port *port);
+       u8              (*get_ipcr)(struct uart_port *port);
+       void            (*command)(struct uart_port *port, u8 cmd);
+       void            (*set_mode)(struct uart_port *port, u8 mr1, u8 mr2);
+       void            (*set_rts)(struct uart_port *port, int state);
+       void            (*enable_ms)(struct uart_port *port);
+       void            (*set_sicr)(struct uart_port *port, u32 val);
+       void            (*set_imr)(struct uart_port *port, u16 val);
+       u8              (*get_mr1)(struct uart_port *port);
 };
 
 /* setting the prescaler and divisor reg is common for all chips */
@@ -134,6 +133,65 @@ static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc,
        out_8(&psc->ctlr, divisor & 0xff);
 }
 
+static u16 mpc52xx_psc_get_status(struct uart_port *port)
+{
+       return in_be16(&PSC(port)->mpc52xx_psc_status);
+}
+
+static u8 mpc52xx_psc_get_ipcr(struct uart_port *port)
+{
+       return in_8(&PSC(port)->mpc52xx_psc_ipcr);
+}
+
+static void mpc52xx_psc_command(struct uart_port *port, u8 cmd)
+{
+       out_8(&PSC(port)->command, cmd);
+}
+
+static void mpc52xx_psc_set_mode(struct uart_port *port, u8 mr1, u8 mr2)
+{
+       out_8(&PSC(port)->command, MPC52xx_PSC_SEL_MODE_REG_1);
+       out_8(&PSC(port)->mode, mr1);
+       out_8(&PSC(port)->mode, mr2);
+}
+
+static void mpc52xx_psc_set_rts(struct uart_port *port, int state)
+{
+       if (state)
+               out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS);
+       else
+               out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS);
+}
+
+static void mpc52xx_psc_enable_ms(struct uart_port *port)
+{
+       struct mpc52xx_psc __iomem *psc = PSC(port);
+
+       /* clear D_*-bits by reading them */
+       in_8(&psc->mpc52xx_psc_ipcr);
+       /* enable CTS and DCD as IPC interrupts */
+       out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
+
+       port->read_status_mask |= MPC52xx_PSC_IMR_IPC;
+       out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_set_sicr(struct uart_port *port, u32 val)
+{
+       out_be32(&PSC(port)->sicr, val);
+}
+
+static void mpc52xx_psc_set_imr(struct uart_port *port, u16 val)
+{
+       out_be16(&PSC(port)->mpc52xx_psc_imr, val);
+}
+
+static u8 mpc52xx_psc_get_mr1(struct uart_port *port)
+{
+       out_8(&PSC(port)->command, MPC52xx_PSC_SEL_MODE_REG_1);
+       return in_8(&PSC(port)->mode);
+}
+
 #ifdef CONFIG_PPC_MPC52xx
 #define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
 static void mpc52xx_psc_fifo_init(struct uart_port *port)
@@ -304,6 +362,15 @@ static struct psc_ops mpc52xx_psc_ops = {
        .set_baudrate = mpc5200_psc_set_baudrate,
        .get_irq = mpc52xx_psc_get_irq,
        .handle_irq = mpc52xx_psc_handle_irq,
+       .get_status = mpc52xx_psc_get_status,
+       .get_ipcr = mpc52xx_psc_get_ipcr,
+       .command = mpc52xx_psc_command,
+       .set_mode = mpc52xx_psc_set_mode,
+       .set_rts = mpc52xx_psc_set_rts,
+       .enable_ms = mpc52xx_psc_enable_ms,
+       .set_sicr = mpc52xx_psc_set_sicr,
+       .set_imr = mpc52xx_psc_set_imr,
+       .get_mr1 = mpc52xx_psc_get_mr1,
 };
 
 static struct psc_ops mpc5200b_psc_ops = {
@@ -325,6 +392,15 @@ static struct psc_ops mpc5200b_psc_ops = {
        .set_baudrate = mpc5200b_psc_set_baudrate,
        .get_irq = mpc52xx_psc_get_irq,
        .handle_irq = mpc52xx_psc_handle_irq,
+       .get_status = mpc52xx_psc_get_status,
+       .get_ipcr = mpc52xx_psc_get_ipcr,
+       .command = mpc52xx_psc_command,
+       .set_mode = mpc52xx_psc_set_mode,
+       .set_rts = mpc52xx_psc_set_rts,
+       .enable_ms = mpc52xx_psc_enable_ms,
+       .set_sicr = mpc52xx_psc_set_sicr,
+       .set_imr = mpc52xx_psc_set_imr,
+       .get_mr1 = mpc52xx_psc_get_mr1,
 };
 
 #endif /* CONFIG_MPC52xx */
@@ -572,6 +648,246 @@ static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
        port->irqflags = IRQF_SHARED;
        port->irq = psc_fifoc_irq;
 }
+#endif
+
+#ifdef CONFIG_PPC_MPC512x
+
+#define PSC_5125(port) ((struct mpc5125_psc __iomem *)((port)->membase))
+#define FIFO_5125(port) ((struct mpc512x_psc_fifo __iomem *)(PSC_5125(port)+1))
+
+static void mpc5125_psc_fifo_init(struct uart_port *port)
+{
+       /* /32 prescaler */
+       out_8(&PSC_5125(port)->mpc52xx_psc_clock_select, 0xdd);
+
+       out_be32(&FIFO_5125(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+       out_be32(&FIFO_5125(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+       out_be32(&FIFO_5125(port)->txalarm, 1);
+       out_be32(&FIFO_5125(port)->tximr, 0);
+
+       out_be32(&FIFO_5125(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+       out_be32(&FIFO_5125(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+       out_be32(&FIFO_5125(port)->rxalarm, 1);
+       out_be32(&FIFO_5125(port)->rximr, 0);
+
+       out_be32(&FIFO_5125(port)->tximr, MPC512x_PSC_FIFO_ALARM);
+       out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM);
+}
+
+static int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
+{
+       return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
+}
+
+static int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
+{
+       return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL);
+}
+
+static int mpc5125_psc_rx_rdy(struct uart_port *port)
+{
+       return in_be32(&FIFO_5125(port)->rxsr) &
+              in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc5125_psc_tx_rdy(struct uart_port *port)
+{
+       return in_be32(&FIFO_5125(port)->txsr) &
+              in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc5125_psc_tx_empty(struct uart_port *port)
+{
+       return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY;
+}
+
+static void mpc5125_psc_stop_rx(struct uart_port *port)
+{
+       unsigned long rx_fifo_imr;
+
+       rx_fifo_imr = in_be32(&FIFO_5125(port)->rximr);
+       rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+       out_be32(&FIFO_5125(port)->rximr, rx_fifo_imr);
+}
+
+static void mpc5125_psc_start_tx(struct uart_port *port)
+{
+       unsigned long tx_fifo_imr;
+
+       tx_fifo_imr = in_be32(&FIFO_5125(port)->tximr);
+       tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM;
+       out_be32(&FIFO_5125(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc5125_psc_stop_tx(struct uart_port *port)
+{
+       unsigned long tx_fifo_imr;
+
+       tx_fifo_imr = in_be32(&FIFO_5125(port)->tximr);
+       tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+       out_be32(&FIFO_5125(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc5125_psc_rx_clr_irq(struct uart_port *port)
+{
+       out_be32(&FIFO_5125(port)->rxisr, in_be32(&FIFO_5125(port)->rxisr));
+}
+
+static void mpc5125_psc_tx_clr_irq(struct uart_port *port)
+{
+       out_be32(&FIFO_5125(port)->txisr, in_be32(&FIFO_5125(port)->txisr));
+}
+
+static void mpc5125_psc_write_char(struct uart_port *port, unsigned char c)
+{
+       out_8(&FIFO_5125(port)->txdata_8, c);
+}
+
+static unsigned char mpc5125_psc_read_char(struct uart_port *port)
+{
+       return in_8(&FIFO_5125(port)->rxdata_8);
+}
+
+static void mpc5125_psc_cw_disable_ints(struct uart_port *port)
+{
+       port->read_status_mask =
+               in_be32(&FIFO_5125(port)->tximr) << 16 |
+               in_be32(&FIFO_5125(port)->rximr);
+       out_be32(&FIFO_5125(port)->tximr, 0);
+       out_be32(&FIFO_5125(port)->rximr, 0);
+}
+
+static void mpc5125_psc_cw_restore_ints(struct uart_port *port)
+{
+       out_be32(&FIFO_5125(port)->tximr,
+               (port->read_status_mask >> 16) & 0x7f);
+       out_be32(&FIFO_5125(port)->rximr, port->read_status_mask & 0x7f);
+}
+
+static inline void mpc5125_set_divisor(struct mpc5125_psc __iomem *psc,
+               u8 prescaler, unsigned int divisor)
+{
+       /* select prescaler */
+       out_8(&psc->mpc52xx_psc_clock_select, prescaler);
+       out_8(&psc->ctur, divisor >> 8);
+       out_8(&psc->ctlr, divisor & 0xff);
+}
+
+static unsigned int mpc5125_psc_set_baudrate(struct uart_port *port,
+                                            struct ktermios *new,
+                                            struct ktermios *old)
+{
+       unsigned int baud;
+       unsigned int divisor;
+
+       /*
+        * Calculate with a /16 prescaler here.
+        */
+
+       /* uartclk contains the ips freq */
+       baud = uart_get_baud_rate(port, new, old,
+                                 port->uartclk / (16 * 0xffff) + 1,
+                                 port->uartclk / 16);
+       divisor = (port->uartclk + 8 * baud) / (16 * baud);
+
+       /* enable the /16 prescaler and set the divisor */
+       mpc5125_set_divisor(PSC_5125(port), 0xdd, divisor);
+       return baud;
+}
+
+/*
+ * MPC5125 have compatible PSC FIFO Controller.
+ * Special init not needed.
+ */
+static u16 mpc5125_psc_get_status(struct uart_port *port)
+{
+       return in_be16(&PSC_5125(port)->mpc52xx_psc_status);
+}
+
+static u8 mpc5125_psc_get_ipcr(struct uart_port *port)
+{
+       return in_8(&PSC_5125(port)->mpc52xx_psc_ipcr);
+}
+
+static void mpc5125_psc_command(struct uart_port *port, u8 cmd)
+{
+       out_8(&PSC_5125(port)->command, cmd);
+}
+
+static void mpc5125_psc_set_mode(struct uart_port *port, u8 mr1, u8 mr2)
+{
+       out_8(&PSC_5125(port)->mr1, mr1);
+       out_8(&PSC_5125(port)->mr2, mr2);
+}
+
+static void mpc5125_psc_set_rts(struct uart_port *port, int state)
+{
+       if (state & TIOCM_RTS)
+               out_8(&PSC_5125(port)->op1, MPC52xx_PSC_OP_RTS);
+       else
+               out_8(&PSC_5125(port)->op0, MPC52xx_PSC_OP_RTS);
+}
+
+static void mpc5125_psc_enable_ms(struct uart_port *port)
+{
+       struct mpc5125_psc __iomem *psc = PSC_5125(port);
+
+       /* clear D_*-bits by reading them */
+       in_8(&psc->mpc52xx_psc_ipcr);
+       /* enable CTS and DCD as IPC interrupts */
+       out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
+
+       port->read_status_mask |= MPC52xx_PSC_IMR_IPC;
+       out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc5125_psc_set_sicr(struct uart_port *port, u32 val)
+{
+       out_be32(&PSC_5125(port)->sicr, val);
+}
+
+static void mpc5125_psc_set_imr(struct uart_port *port, u16 val)
+{
+       out_be16(&PSC_5125(port)->mpc52xx_psc_imr, val);
+}
+
+static u8 mpc5125_psc_get_mr1(struct uart_port *port)
+{
+       return in_8(&PSC_5125(port)->mr1);
+}
+
+static struct psc_ops mpc5125_psc_ops = {
+       .fifo_init = mpc5125_psc_fifo_init,
+       .raw_rx_rdy = mpc5125_psc_raw_rx_rdy,
+       .raw_tx_rdy = mpc5125_psc_raw_tx_rdy,
+       .rx_rdy = mpc5125_psc_rx_rdy,
+       .tx_rdy = mpc5125_psc_tx_rdy,
+       .tx_empty = mpc5125_psc_tx_empty,
+       .stop_rx = mpc5125_psc_stop_rx,
+       .start_tx = mpc5125_psc_start_tx,
+       .stop_tx = mpc5125_psc_stop_tx,
+       .rx_clr_irq = mpc5125_psc_rx_clr_irq,
+       .tx_clr_irq = mpc5125_psc_tx_clr_irq,
+       .write_char = mpc5125_psc_write_char,
+       .read_char = mpc5125_psc_read_char,
+       .cw_disable_ints = mpc5125_psc_cw_disable_ints,
+       .cw_restore_ints = mpc5125_psc_cw_restore_ints,
+       .set_baudrate = mpc5125_psc_set_baudrate,
+       .clock = mpc512x_psc_clock,
+       .fifoc_init = mpc512x_psc_fifoc_init,
+       .fifoc_uninit = mpc512x_psc_fifoc_uninit,
+       .get_irq = mpc512x_psc_get_irq,
+       .handle_irq = mpc512x_psc_handle_irq,
+       .get_status = mpc5125_psc_get_status,
+       .get_ipcr = mpc5125_psc_get_ipcr,
+       .command = mpc5125_psc_command,
+       .set_mode = mpc5125_psc_set_mode,
+       .set_rts = mpc5125_psc_set_rts,
+       .enable_ms = mpc5125_psc_enable_ms,
+       .set_sicr = mpc5125_psc_set_sicr,
+       .set_imr = mpc5125_psc_set_imr,
+       .get_mr1 = mpc5125_psc_get_mr1,
+};
 
 static struct psc_ops mpc512x_psc_ops = {
        .fifo_init = mpc512x_psc_fifo_init,
@@ -595,8 +911,18 @@ static struct psc_ops mpc512x_psc_ops = {
        .fifoc_uninit = mpc512x_psc_fifoc_uninit,
        .get_irq = mpc512x_psc_get_irq,
        .handle_irq = mpc512x_psc_handle_irq,
+       .get_status = mpc52xx_psc_get_status,
+       .get_ipcr = mpc52xx_psc_get_ipcr,
+       .command = mpc52xx_psc_command,
+       .set_mode = mpc52xx_psc_set_mode,
+       .set_rts = mpc52xx_psc_set_rts,
+       .enable_ms = mpc52xx_psc_enable_ms,
+       .set_sicr = mpc52xx_psc_set_sicr,
+       .set_imr = mpc52xx_psc_set_imr,
+       .get_mr1 = mpc52xx_psc_get_mr1,
 };
-#endif
+#endif /* CONFIG_PPC_MPC512x */
+
 
 static const struct psc_ops *psc_ops;
 
@@ -613,17 +939,14 @@ mpc52xx_uart_tx_empty(struct uart_port *port)
 static void
 mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
-       if (mctrl & TIOCM_RTS)
-               out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS);
-       else
-               out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS);
+       psc_ops->set_rts(port, mctrl & TIOCM_RTS);
 }
 
 static unsigned int
 mpc52xx_uart_get_mctrl(struct uart_port *port)
 {
        unsigned int ret = TIOCM_DSR;
-       u8 status = in_8(&PSC(port)->mpc52xx_psc_ipcr);
+       u8 status = psc_ops->get_ipcr(port);
 
        if (!(status & MPC52xx_PSC_CTS))
                ret |= TIOCM_CTS;
@@ -673,15 +996,7 @@ mpc52xx_uart_stop_rx(struct uart_port *port)
 static void
 mpc52xx_uart_enable_ms(struct uart_port *port)
 {
-       struct mpc52xx_psc __iomem *psc = PSC(port);
-
-       /* clear D_*-bits by reading them */
-       in_8(&psc->mpc52xx_psc_ipcr);
-       /* enable CTS and DCD as IPC interrupts */
-       out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
-
-       port->read_status_mask |= MPC52xx_PSC_IMR_IPC;
-       out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+       psc_ops->enable_ms(port);
 }
 
 static void
@@ -691,9 +1006,9 @@ mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
        spin_lock_irqsave(&port->lock, flags);
 
        if (ctl == -1)
-               out_8(&PSC(port)->command, MPC52xx_PSC_START_BRK);
+               psc_ops->command(port, MPC52xx_PSC_START_BRK);
        else
-               out_8(&PSC(port)->command, MPC52xx_PSC_STOP_BRK);
+               psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -701,7 +1016,6 @@ mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
 static int
 mpc52xx_uart_startup(struct uart_port *port)
 {
-       struct mpc52xx_psc __iomem *psc = PSC(port);
        int ret;
 
        if (psc_ops->clock) {
@@ -717,15 +1031,15 @@ mpc52xx_uart_startup(struct uart_port *port)
                return ret;
 
        /* Reset/activate the port, clear and enable interrupts */
-       out_8(&psc->command, MPC52xx_PSC_RST_RX);
-       out_8(&psc->command, MPC52xx_PSC_RST_TX);
+       psc_ops->command(port, MPC52xx_PSC_RST_RX);
+       psc_ops->command(port, MPC52xx_PSC_RST_TX);
 
-       out_be32(&psc->sicr, 0);        /* UART mode DCD ignored */
+       psc_ops->set_sicr(port, 0);     /* UART mode DCD ignored */
 
        psc_ops->fifo_init(port);
 
-       out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
-       out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+       psc_ops->command(port, MPC52xx_PSC_TX_ENABLE);
+       psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
 
        return 0;
 }
@@ -733,19 +1047,20 @@ mpc52xx_uart_startup(struct uart_port *port)
 static void
 mpc52xx_uart_shutdown(struct uart_port *port)
 {
-       struct mpc52xx_psc __iomem *psc = PSC(port);
-
        /* Shut down the port.  Leave TX active if on a console port */
-       out_8(&psc->command, MPC52xx_PSC_RST_RX);
+       psc_ops->command(port, MPC52xx_PSC_RST_RX);
        if (!uart_console(port))
-               out_8(&psc->command, MPC52xx_PSC_RST_TX);
+               psc_ops->command(port, MPC52xx_PSC_RST_TX);
 
        port->read_status_mask = 0;
-       out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+       psc_ops->set_imr(port, port->read_status_mask);
 
        if (psc_ops->clock)
                psc_ops->clock(port, 0);
 
+       /* Disable interrupt */
+       psc_ops->cw_disable_ints(port);
+
        /* Release interrupt */
        free_irq(port->irq, port);
 }
@@ -754,7 +1069,6 @@ static void
 mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
                         struct ktermios *old)
 {
-       struct mpc52xx_psc __iomem *psc = PSC(port);
        unsigned long flags;
        unsigned char mr1, mr2;
        unsigned int j;
@@ -818,13 +1132,11 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
                        "Some chars may have been lost.\n");
 
        /* Reset the TX & RX */
-       out_8(&psc->command, MPC52xx_PSC_RST_RX);
-       out_8(&psc->command, MPC52xx_PSC_RST_TX);
+       psc_ops->command(port, MPC52xx_PSC_RST_RX);
+       psc_ops->command(port, MPC52xx_PSC_RST_TX);
 
        /* Send new mode settings */
-       out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
-       out_8(&psc->mode, mr1);
-       out_8(&psc->mode, mr2);
+       psc_ops->set_mode(port, mr1, mr2);
        baud = psc_ops->set_baudrate(port, new, old);
 
        /* Update the per-port timeout */
@@ -834,8 +1146,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
                mpc52xx_uart_enable_ms(port);
 
        /* Reenable TX & RX */
-       out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
-       out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+       psc_ops->command(port, MPC52xx_PSC_TX_ENABLE);
+       psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
 
        /* We're all set, release the lock */
        spin_unlock_irqrestore(&port->lock, flags);
@@ -963,7 +1275,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
                flag = TTY_NORMAL;
                port->icount.rx++;
 
-               status = in_be16(&PSC(port)->mpc52xx_psc_status);
+               status = psc_ops->get_status(port);
 
                if (status & (MPC52xx_PSC_SR_PE |
                              MPC52xx_PSC_SR_FE |
@@ -983,7 +1295,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
                        }
 
                        /* Clear error condition */
-                       out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT);
+                       psc_ops->command(port, MPC52xx_PSC_RST_ERR_STAT);
 
                }
                tty_insert_flip_char(tport, ch, flag);
@@ -1066,7 +1378,7 @@ mpc5xxx_uart_process_int(struct uart_port *port)
                if (psc_ops->tx_rdy(port))
                        keepgoing |= mpc52xx_uart_int_tx_chars(port);
 
-               status = in_8(&PSC(port)->mpc52xx_psc_ipcr);
+               status = psc_ops->get_ipcr(port);
                if (status & MPC52xx_PSC_D_DCD)
                        uart_handle_dcd_change(port, !(status & MPC52xx_PSC_DCD));
 
@@ -1107,14 +1419,12 @@ static void __init
 mpc52xx_console_get_options(struct uart_port *port,
                            int *baud, int *parity, int *bits, int *flow)
 {
-       struct mpc52xx_psc __iomem *psc = PSC(port);
        unsigned char mr1;
 
        pr_debug("mpc52xx_console_get_options(port=%p)\n", port);
 
        /* Read the mode registers */
-       out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
-       mr1 = in_8(&psc->mode);
+       mr1 = psc_ops->get_mr1(port);
 
        /* CT{U,L}R are write-only ! */
        *baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
@@ -1304,6 +1614,7 @@ static struct of_device_id mpc52xx_uart_of_match[] = {
 #endif
 #ifdef CONFIG_PPC_MPC512x
        { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, },
+       { .compatible = "fsl,mpc5125-psc-uart", .data = &mpc5125_psc_ops, },
 #endif
        {},
 };
@@ -1372,15 +1683,14 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
        if (ret)
                return ret;
 
-       dev_set_drvdata(&op->dev, (void *)port);
+       platform_set_drvdata(op, (void *)port);
        return 0;
 }
 
 static int
 mpc52xx_uart_of_remove(struct platform_device *op)
 {
-       struct uart_port *port = dev_get_drvdata(&op->dev);
-       dev_set_drvdata(&op->dev, NULL);
+       struct uart_port *port = platform_get_drvdata(op);
 
        if (port)
                uart_remove_one_port(&mpc52xx_uart_driver, port);
@@ -1392,7 +1702,7 @@ mpc52xx_uart_of_remove(struct platform_device *op)
 static int
 mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
 {
-       struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+       struct uart_port *port = (struct uart_port *) platform_get_drvdata(op);
 
        if (port)
                uart_suspend_port(&mpc52xx_uart_driver, port);
@@ -1403,7 +1713,7 @@ mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
 static int
 mpc52xx_uart_of_resume(struct platform_device *op)
 {
-       struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+       struct uart_port *port = (struct uart_port *) platform_get_drvdata(op);
 
        if (port)
                uart_resume_port(&mpc52xx_uart_driver, port);
index 39c7ea4cb14fb9e95fbc920d86010f39db2aa691..2caf9c6f61499f15738a29c6659d91f1c7fad826 100644 (file)
@@ -204,7 +204,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
 
        info->type = port_type;
        info->line = ret;
-       dev_set_drvdata(&ofdev->dev, info);
+       platform_set_drvdata(ofdev, info);
        return 0;
 out:
        kfree(info);
@@ -217,7 +217,7 @@ out:
  */
 static int of_platform_serial_remove(struct platform_device *ofdev)
 {
-       struct of_serial_info *info = dev_get_drvdata(&ofdev->dev);
+       struct of_serial_info *info = platform_get_drvdata(ofdev);
        switch (info->type) {
 #ifdef CONFIG_SERIAL_8250
        case PORT_8250 ... PORT_MAX_8250:
index f0b9f6b52b32cd887ad6b86586f97aa9eec9648c..b6d17287307636dd34afeb3e97fc4a491efe437c 100644 (file)
@@ -161,6 +161,7 @@ struct uart_omap_port {
        u32                     calc_latency;
        struct work_struct      qos_work;
        struct pinctrl          *pins;
+       bool                    is_suspending;
 };
 
 #define to_uart_omap_port(p)   ((container_of((p), struct uart_omap_port, port)))
@@ -197,7 +198,7 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
        struct omap_uart_port_info *pdata = up->dev->platform_data;
 
        if (!pdata || !pdata->get_context_loss_count)
-               return 0;
+               return -EINVAL;
 
        return pdata->get_context_loss_count(up->dev);
 }
@@ -1289,6 +1290,22 @@ static struct uart_driver serial_omap_reg = {
 };
 
 #ifdef CONFIG_PM_SLEEP
+static int serial_omap_prepare(struct device *dev)
+{
+       struct uart_omap_port *up = dev_get_drvdata(dev);
+
+       up->is_suspending = true;
+
+       return 0;
+}
+
+static void serial_omap_complete(struct device *dev)
+{
+       struct uart_omap_port *up = dev_get_drvdata(dev);
+
+       up->is_suspending = false;
+}
+
 static int serial_omap_suspend(struct device *dev)
 {
        struct uart_omap_port *up = dev_get_drvdata(dev);
@@ -1307,7 +1324,10 @@ static int serial_omap_resume(struct device *dev)
 
        return 0;
 }
-#endif
+#else
+#define serial_omap_prepare NULL
+#define serial_omap_complete NULL
+#endif /* CONFIG_PM_SLEEP */
 
 static void omap_serial_fill_features_erratas(struct uart_omap_port *up)
 {
@@ -1482,6 +1502,9 @@ static int serial_omap_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, up);
        pm_runtime_enable(&pdev->dev);
+       if (omap_up_info->autosuspend_timeout == 0)
+               omap_up_info->autosuspend_timeout = -1;
+       device_init_wakeup(up->dev, true);
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev,
                        omap_up_info->autosuspend_timeout);
@@ -1591,13 +1614,19 @@ static void serial_omap_restore_context(struct uart_omap_port *up)
 static int serial_omap_runtime_suspend(struct device *dev)
 {
        struct uart_omap_port *up = dev_get_drvdata(dev);
-       struct omap_uart_port_info *pdata = dev->platform_data;
 
        if (!up)
                return -EINVAL;
 
-       if (!pdata)
-               return 0;
+       /*
+       * When using 'no_console_suspend', the console UART must not be
+       * suspended. Since driver suspend is managed by runtime suspend,
+       * preventing runtime suspend (by returning error) will keep device
+       * active during suspend.
+       */
+       if (up->is_suspending && !console_suspend_enabled &&
+           uart_console(&up->port))
+               return -EBUSY;
 
        up->context_loss_cnt = serial_omap_get_context_loss_count(up);
 
@@ -1626,7 +1655,7 @@ static int serial_omap_runtime_resume(struct device *dev)
        int loss_cnt = serial_omap_get_context_loss_count(up);
 
        if (loss_cnt < 0) {
-               dev_err(dev, "serial_omap_get_context_loss_count failed : %d\n",
+               dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n",
                        loss_cnt);
                serial_omap_restore_context(up);
        } else if (up->context_loss_cnt != loss_cnt) {
@@ -1643,6 +1672,8 @@ static const struct dev_pm_ops serial_omap_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
        SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
                                serial_omap_runtime_resume, NULL)
+       .prepare        = serial_omap_prepare,
+       .complete       = serial_omap_complete,
 };
 
 #if defined(CONFIG_OF)
index 21a7e179edf36a2ef476553159000e7eef81c913..572d48189de910c632ed12e93b4f603bceb55172 100644 (file)
@@ -217,6 +217,7 @@ enum {
 #define FRI2_64_UARTCLK  64000000 /*  64.0000 MHz */
 #define FRI2_48_UARTCLK  48000000 /*  48.0000 MHz */
 #define NTC1_UARTCLK     64000000 /*  64.0000 MHz */
+#define MINNOW_UARTCLK   50000000 /*  50.0000 MHz */
 
 struct pch_uart_buffer {
        unsigned char *buf;
@@ -398,6 +399,10 @@ static int pch_uart_get_uartclk(void)
                    strstr(cmp, "nanoETXexpress-TT")))
                return NTC1_UARTCLK;
 
+       cmp = dmi_get_system_info(DMI_BOARD_NAME);
+       if (cmp && strstr(cmp, "MinnowBoard"))
+               return MINNOW_UARTCLK;
+
        return DEFAULT_UARTCLK;
 }
 
index 0c8a9fa2be6cee12182bb8e32e0ed37be1c8fdab..81ebc86a9e23480fe2498093e96a44429f1d880a 100644 (file)
@@ -1811,7 +1811,13 @@ static int __init s3c24xx_serial_modinit(void)
                return ret;
        }
 
-       return platform_driver_register(&samsung_serial_driver);
+       ret = platform_driver_register(&samsung_serial_driver);
+       if (ret < 0) {
+               pr_err("Failed to register platform driver\n");
+               uart_unregister_driver(&s3c24xx_uart_drv);
+       }
+
+       return ret;
 }
 
 static void __exit s3c24xx_serial_modexit(void)
index c9735680762d87fc3362dd62e1eab528e1bcb027..4b1434d53e9d6ddbe9c478f0631379c527abf31a 100644 (file)
@@ -696,7 +696,7 @@ static int sc26xx_probe(struct platform_device *dev)
        if (err)
                goto out_remove_ports;
 
-       dev_set_drvdata(&dev->dev, up);
+       platform_set_drvdata(dev, up);
        return 0;
 
 out_remove_ports:
@@ -716,7 +716,7 @@ out_free_port:
 
 static int __exit sc26xx_driver_remove(struct platform_device *dev)
 {
-       struct uart_sc26xx_port *up = dev_get_drvdata(&dev->dev);
+       struct uart_sc26xx_port *up = platform_get_drvdata(dev);
 
        free_irq(up->port[0].irq, up);
 
@@ -728,7 +728,6 @@ static int __exit sc26xx_driver_remove(struct platform_device *dev)
        kfree(up);
        sc26xx_port = NULL;
 
-       dev_set_drvdata(&dev->dev, NULL);
        return 0;
 }
 
index f87dbfd3277047a9c9d6d2ef6fdd7c3487a32f93..28cdd2829139270bbd5dd2b9624481c6bd32b23c 100644 (file)
@@ -50,12 +50,6 @@ static struct lock_class_key port_lock_key;
 
 #define HIGH_BITS_OFFSET       ((sizeof(long)-sizeof(int))*8)
 
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
-#define uart_console(port)     ((port)->cons && (port)->cons->index == (port)->line)
-#else
-#define uart_console(port)     (0)
-#endif
-
 static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
                                        struct ktermios *old_termios);
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
index 03465b673945c0c76d65596bb2e50bec010f31e9..1fd564b8194b7affe59be4ae0d3b827829c7ba8a 100644 (file)
@@ -687,9 +687,10 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
 
        if (sirfport->hw_flow_ctrl) {
                sirfport->p = pinctrl_get_select_default(&pdev->dev);
-               ret = IS_ERR(sirfport->p);
-               if (ret)
+               if (IS_ERR(sirfport->p)) {
+                       ret = PTR_ERR(sirfport->p);
                        goto err;
+               }
        }
 
        sirfport->clk = clk_get(&pdev->dev, NULL);
index ba60708053e0f740920d89b6fe76dd68666d8bd1..cf86e729532b9fc11b768863c94bfb9fe1da7109 100644 (file)
@@ -577,7 +577,7 @@ static int hv_probe(struct platform_device *op)
        if (err)
                goto out_remove_port;
 
-       dev_set_drvdata(&op->dev, port);
+       platform_set_drvdata(op, port);
 
        return 0;
 
@@ -601,7 +601,7 @@ out_free_port:
 
 static int hv_remove(struct platform_device *dev)
 {
-       struct uart_port *port = dev_get_drvdata(&dev->dev);
+       struct uart_port *port = platform_get_drvdata(dev);
 
        free_irq(port->irq, port);
 
@@ -612,8 +612,6 @@ static int hv_remove(struct platform_device *dev)
        kfree(port);
        sunhv_port = NULL;
 
-       dev_set_drvdata(&dev->dev, NULL);
-
        return 0;
 }
 
index a422c8b55a47b427b34ec09a3bda5b563ea6d8a0..5d6136b2a04a8c488e609696952e5517d5cc412a 100644 (file)
@@ -1037,7 +1037,7 @@ static int sab_probe(struct platform_device *op)
        if (err)
                goto out3;
 
-       dev_set_drvdata(&op->dev, &up[0]);
+       platform_set_drvdata(op, &up[0]);
 
        inst++;
 
@@ -1059,7 +1059,7 @@ out:
 
 static int sab_remove(struct platform_device *op)
 {
-       struct uart_sunsab_port *up = dev_get_drvdata(&op->dev);
+       struct uart_sunsab_port *up = platform_get_drvdata(op);
 
        uart_remove_one_port(&sunsab_reg, &up[1].port);
        uart_remove_one_port(&sunsab_reg, &up[0].port);
@@ -1070,8 +1070,6 @@ static int sab_remove(struct platform_device *op)
                   up[0].port.membase,
                   sizeof(union sab82532_async_regs));
 
-       dev_set_drvdata(&op->dev, NULL);
-
        return 0;
 }
 
index 0d8465728473304ed26a66877536c7cf002f22a4..699cc1b5f6aa2c5c39cd89a2bb8ea5b192140015 100644 (file)
@@ -1454,7 +1454,7 @@ static int su_probe(struct platform_device *op)
                        kfree(up);
                        return err;
                }
-               dev_set_drvdata(&op->dev, up);
+               platform_set_drvdata(op, up);
 
                nr_inst++;
 
@@ -1483,7 +1483,7 @@ static int su_probe(struct platform_device *op)
        if (err)
                goto out_unmap;
 
-       dev_set_drvdata(&op->dev, up);
+       platform_set_drvdata(op, up);
 
        nr_inst++;
 
@@ -1496,7 +1496,7 @@ out_unmap:
 
 static int su_remove(struct platform_device *op)
 {
-       struct uart_sunsu_port *up = dev_get_drvdata(&op->dev);
+       struct uart_sunsu_port *up = platform_get_drvdata(op);
        bool kbdms = false;
 
        if (up->su_type == SU_PORT_MS ||
@@ -1516,8 +1516,6 @@ static int su_remove(struct platform_device *op)
        if (kbdms)
                kfree(up);
 
-       dev_set_drvdata(&op->dev, NULL);
-
        return 0;
 }
 
index 813ef8eb8effaa5b6b2d9116dc3c2a4c43cba4f5..135a1520353260e8f6665538ccce381e3036c562 100644 (file)
@@ -1495,7 +1495,7 @@ static int zs_probe(struct platform_device *op)
                kbm_inst++;
        }
 
-       dev_set_drvdata(&op->dev, &up[0]);
+       platform_set_drvdata(op, &up[0]);
 
        return 0;
 }
@@ -1512,7 +1512,7 @@ static void zs_remove_one(struct uart_sunzilog_port *up)
 
 static int zs_remove(struct platform_device *op)
 {
-       struct uart_sunzilog_port *up = dev_get_drvdata(&op->dev);
+       struct uart_sunzilog_port *up = platform_get_drvdata(op);
        struct zilog_layout __iomem *regs;
 
        zs_remove_one(&up[0]);
@@ -1521,8 +1521,6 @@ static int zs_remove(struct platform_device *op)
        regs = sunzilog_chip_regs[up[0].port.line / 2];
        of_iounmap(&op->resource[0], regs, sizeof(struct zilog_layout));
 
-       dev_set_drvdata(&op->dev, NULL);
-
        return 0;
 }
 
index 7355303dad9968082da43b8b84fc3f745dda11a6..88317482b81fce4929dc2556e1b19358c6156017 100644 (file)
@@ -1451,7 +1451,7 @@ static int ucc_uart_probe(struct platform_device *ofdev)
                goto out_np;
        }
 
-       dev_set_drvdata(&ofdev->dev, qe_port);
+       platform_set_drvdata(ofdev, qe_port);
 
        dev_info(&ofdev->dev, "UCC%u assigned to /dev/ttyQE%u\n",
                qe_port->ucc_num + 1, qe_port->port.line);
@@ -1471,13 +1471,12 @@ out_free:
 
 static int ucc_uart_remove(struct platform_device *ofdev)
 {
-       struct uart_qe_port *qe_port = dev_get_drvdata(&ofdev->dev);
+       struct uart_qe_port *qe_port = platform_get_drvdata(ofdev);
 
        dev_info(&ofdev->dev, "removing /dev/ttyQE%u\n", qe_port->port.line);
 
        uart_remove_one_port(&ucc_uart_driver, &qe_port->port);
 
-       dev_set_drvdata(&ofdev->dev, NULL);
        kfree(qe_port);
 
        return 0;
@@ -1518,9 +1517,11 @@ static int __init ucc_uart_init(void)
        }
 
        ret = platform_driver_register(&ucc_uart_of_driver);
-       if (ret)
+       if (ret) {
                printk(KERN_ERR
                       "ucc-uart: could not register platform driver\n");
+               uart_unregister_driver(&ucc_uart_driver);
+       }
 
        return ret;
 }
index 1a8bc2275ea4f190e9d994cb7e3786069f168a7e..48af43de34677d17343cc7e95b99f54cb5d821a9 100644 (file)
@@ -648,7 +648,7 @@ static struct platform_driver vt8500_platform_driver = {
        .driver = {
                .name = "vt8500_serial",
                .owner = THIS_MODULE,
-               .of_match_table = of_match_ptr(wmt_dt_ids),
+               .of_match_table = wmt_dt_ids,
        },
 };
 
index 4e5c77834c50fc6586848f9b292b81456b481da6..6c9174530422953a2b1294ab0c1cf55d3e128882 100644 (file)
@@ -974,12 +974,11 @@ static int xuartps_probe(struct platform_device *pdev)
                port->dev = &pdev->dev;
                port->uartclk = clk_get_rate(clk);
                port->private_data = clk;
-               dev_set_drvdata(&pdev->dev, port);
+               platform_set_drvdata(pdev, port);
                rc = uart_add_one_port(&xuartps_uart_driver, port);
                if (rc) {
                        dev_err(&pdev->dev,
                                "uart_add_one_port() failed; err=%i\n", rc);
-                       dev_set_drvdata(&pdev->dev, NULL);
                        return rc;
                }
                return 0;
@@ -994,46 +993,17 @@ static int xuartps_probe(struct platform_device *pdev)
  **/
 static int xuartps_remove(struct platform_device *pdev)
 {
-       struct uart_port *port = dev_get_drvdata(&pdev->dev);
+       struct uart_port *port = platform_get_drvdata(pdev);
        struct clk *clk = port->private_data;
        int rc;
 
        /* Remove the xuartps port from the serial core */
        rc = uart_remove_one_port(&xuartps_uart_driver, port);
-       dev_set_drvdata(&pdev->dev, NULL);
        port->mapbase = 0;
        clk_disable_unprepare(clk);
        return rc;
 }
 
-/**
- * xuartps_suspend - suspend event
- * @pdev: Pointer to the platform device structure
- * @state: State of the device
- *
- * Returns 0
- **/
-static int xuartps_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       /* Call the API provided in serial_core.c file which handles
-        * the suspend.
-        */
-       uart_suspend_port(&xuartps_uart_driver, &xuartps_port[pdev->id]);
-       return 0;
-}
-
-/**
- * xuartps_resume - Resume after a previous suspend
- * @pdev: Pointer to the platform device structure
- *
- * Returns 0
- **/
-static int xuartps_resume(struct platform_device *pdev)
-{
-       uart_resume_port(&xuartps_uart_driver, &xuartps_port[pdev->id]);
-       return 0;
-}
-
 /* Match table for of_platform binding */
 static struct of_device_id xuartps_of_match[] = {
        { .compatible = "xlnx,xuartps", },
@@ -1044,8 +1014,6 @@ MODULE_DEVICE_TABLE(of, xuartps_of_match);
 static struct platform_driver xuartps_platform_driver = {
        .probe   = xuartps_probe,               /* Probe method */
        .remove  = xuartps_remove,              /* Detach method */
-       .suspend = xuartps_suspend,             /* Suspend */
-       .resume  = xuartps_resume,              /* Resume after a suspend */
        .driver  = {
                .owner = THIS_MODULE,
                .name = XUARTPS_NAME,           /* Driver name */
index b51c15408ff317980205880dae4961e1db793eb4..5f68f2cfdfd0fe7b263521af8e2f237d8088a27d 100644 (file)
@@ -932,7 +932,7 @@ static int sysrq_reset_seq_param_set(const char *buffer,
        unsigned long val;
        int error;
 
-       error = strict_strtoul(buffer, 0, &val);
+       error = kstrtoul(buffer, 0, &val);
        if (error < 0)
                return error;
 
index 6464029e4860968945840c252ad632b556f9f947..366af832794b54db8ab9b8b7c7f524bdfe11f464 100644 (file)
@@ -1618,6 +1618,8 @@ static void release_tty(struct tty_struct *tty, int idx)
        tty_free_termios(tty);
        tty_driver_remove_tty(tty->driver, tty);
        tty->port->itty = NULL;
+       if (tty->link)
+               tty->link->port->itty = NULL;
        cancel_work_sync(&tty->port->buf.work);
 
        if (tty->link)
@@ -2138,6 +2140,7 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
 static int __tty_fasync(int fd, struct file *filp, int on)
 {
        struct tty_struct *tty = file_tty(filp);
+       struct tty_ldisc *ldisc;
        unsigned long flags;
        int retval = 0;
 
@@ -2148,11 +2151,17 @@ static int __tty_fasync(int fd, struct file *filp, int on)
        if (retval <= 0)
                goto out;
 
+       ldisc = tty_ldisc_ref(tty);
+       if (ldisc) {
+               if (ldisc->ops->fasync)
+                       ldisc->ops->fasync(tty, on);
+               tty_ldisc_deref(ldisc);
+       }
+
        if (on) {
                enum pid_type type;
                struct pid *pid;
-               if (!waitqueue_active(&tty->read_wait))
-                       tty->minimum_to_wake = 1;
+
                spin_lock_irqsave(&tty->ctrl_lock, flags);
                if (tty->pgrp) {
                        pid = tty->pgrp;
@@ -2165,13 +2174,7 @@ static int __tty_fasync(int fd, struct file *filp, int on)
                spin_unlock_irqrestore(&tty->ctrl_lock, flags);
                retval = __f_setown(filp, pid, type, 0);
                put_pid(pid);
-               if (retval)
-                       goto out;
-       } else {
-               if (!tty->fasync && !waitqueue_active(&tty->read_wait))
-                       tty->minimum_to_wake = N_TTY_BUF_SIZE;
        }
-       retval = 0;
 out:
        return retval;
 }
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
new file mode 100644 (file)
index 0000000..22fad8a
--- /dev/null
@@ -0,0 +1,453 @@
+/*
+ * Ldisc rw semaphore
+ *
+ * The ldisc semaphore is semantically a rw_semaphore but which enforces
+ * an alternate policy, namely:
+ *   1) Supports lock wait timeouts
+ *   2) Write waiter has priority
+ *   3) Downgrading is not supported
+ *
+ * Implementation notes:
+ *   1) Upper half of semaphore count is a wait count (differs from rwsem
+ *     in that rwsem normalizes the upper half to the wait bias)
+ *   2) Lacks overflow checking
+ *
+ * The generic counting was copied and modified from include/asm-generic/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ *
+ * The scheduling policy was copied and modified from lib/rwsem.c
+ * Written by David Howells (dhowells@redhat.com).
+ *
+ * This implementation incorporates the write lock stealing work of
+ * Michel Lespinasse <walken@google.com>.
+ *
+ * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
+ *
+ * This file may be redistributed under the terms of the GNU General Public
+ * License v2.
+ */
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/tty.h>
+#include <linux/sched.h>
+
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __acq(l, s, t, r, c, n, i)            \
+                               lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
+# define __rel(l, n, i)                                \
+                               lock_release(&(l)->dep_map, n, i)
+# ifdef CONFIG_PROVE_LOCKING
+#  define lockdep_acquire(l, s, t, i)          __acq(l, s, t, 0, 2, NULL, i)
+#  define lockdep_acquire_nest(l, s, t, n, i)  __acq(l, s, t, 0, 2, n, i)
+#  define lockdep_acquire_read(l, s, t, i)     __acq(l, s, t, 1, 2, NULL, i)
+#  define lockdep_release(l, n, i)             __rel(l, n, i)
+# else
+#  define lockdep_acquire(l, s, t, i)          __acq(l, s, t, 0, 1, NULL, i)
+#  define lockdep_acquire_nest(l, s, t, n, i)  __acq(l, s, t, 0, 1, n, i)
+#  define lockdep_acquire_read(l, s, t, i)     __acq(l, s, t, 1, 1, NULL, i)
+#  define lockdep_release(l, n, i)             __rel(l, n, i)
+# endif
+#else
+# define lockdep_acquire(l, s, t, i)           do { } while (0)
+# define lockdep_acquire_nest(l, s, t, n, i)   do { } while (0)
+# define lockdep_acquire_read(l, s, t, i)      do { } while (0)
+# define lockdep_release(l, n, i)              do { } while (0)
+#endif
+
+#ifdef CONFIG_LOCK_STAT
+# define lock_stat(_lock, stat)                lock_##stat(&(_lock)->dep_map, _RET_IP_)
+#else
+# define lock_stat(_lock, stat)                do { } while (0)
+#endif
+
+
+#if BITS_PER_LONG == 64
+# define LDSEM_ACTIVE_MASK     0xffffffffL
+#else
+# define LDSEM_ACTIVE_MASK     0x0000ffffL
+#endif
+
+#define LDSEM_UNLOCKED         0L
+#define LDSEM_ACTIVE_BIAS      1L
+#define LDSEM_WAIT_BIAS                (-LDSEM_ACTIVE_MASK-1)
+#define LDSEM_READ_BIAS                LDSEM_ACTIVE_BIAS
+#define LDSEM_WRITE_BIAS       (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
+
+struct ldsem_waiter {
+       struct list_head list;
+       struct task_struct *task;
+};
+
+static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
+{
+       return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
+}
+
+static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
+{
+       long tmp = *old;
+       *old = atomic_long_cmpxchg(&sem->count, *old, new);
+       return *old == tmp;
+}
+
+/*
+ * Initialize an ldsem:
+ */
+void __init_ldsem(struct ld_semaphore *sem, const char *name,
+                 struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       /*
+        * Make sure we are not reinitializing a held semaphore:
+        */
+       debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+       lockdep_init_map(&sem->dep_map, name, key, 0);
+#endif
+       sem->count = LDSEM_UNLOCKED;
+       sem->wait_readers = 0;
+       raw_spin_lock_init(&sem->wait_lock);
+       INIT_LIST_HEAD(&sem->read_wait);
+       INIT_LIST_HEAD(&sem->write_wait);
+}
+
+static void __ldsem_wake_readers(struct ld_semaphore *sem)
+{
+       struct ldsem_waiter *waiter, *next;
+       struct task_struct *tsk;
+       long adjust, count;
+
+       /* Try to grant read locks to all readers on the read wait list.
+        * Note the 'active part' of the count is incremented by
+        * the number of readers before waking any processes up.
+        */
+       adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
+       count = ldsem_atomic_update(adjust, sem);
+       do {
+               if (count > 0)
+                       break;
+               if (ldsem_cmpxchg(&count, count - adjust, sem))
+                       return;
+       } while (1);
+
+       list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
+               tsk = waiter->task;
+               smp_mb();
+               waiter->task = NULL;
+               wake_up_process(tsk);
+               put_task_struct(tsk);
+       }
+       INIT_LIST_HEAD(&sem->read_wait);
+       sem->wait_readers = 0;
+}
+
+static inline int writer_trylock(struct ld_semaphore *sem)
+{
+       /* only wake this writer if the active part of the count can be
+        * transitioned from 0 -> 1
+        */
+       long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
+       do {
+               if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
+                       return 1;
+               if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
+                       return 0;
+       } while (1);
+}
+
+static void __ldsem_wake_writer(struct ld_semaphore *sem)
+{
+       struct ldsem_waiter *waiter;
+
+       waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
+       wake_up_process(waiter->task);
+}
+
+/*
+ * handle the lock release when processes blocked on it that can now run
+ * - if we come here from up_xxxx(), then:
+ *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
+ *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
+ * - the spinlock must be held by the caller
+ * - woken process blocks are discarded from the list after having task zeroed
+ */
+static void __ldsem_wake(struct ld_semaphore *sem)
+{
+       if (!list_empty(&sem->write_wait))
+               __ldsem_wake_writer(sem);
+       else if (!list_empty(&sem->read_wait))
+               __ldsem_wake_readers(sem);
+}
+
+static void ldsem_wake(struct ld_semaphore *sem)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
+       __ldsem_wake(sem);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+}
+
+/*
+ * wait for the read lock to be granted
+ */
+static struct ld_semaphore __sched *
+down_read_failed(struct ld_semaphore *sem, long count, long timeout)
+{
+       struct ldsem_waiter waiter;
+       struct task_struct *tsk = current;
+       long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
+
+       /* set up my own style of waitqueue */
+       raw_spin_lock_irq(&sem->wait_lock);
+
+       /* Try to reverse the lock attempt but if the count has changed
+        * so that reversing fails, check if there are are no waiters,
+        * and early-out if not */
+       do {
+               if (ldsem_cmpxchg(&count, count + adjust, sem))
+                       break;
+               if (count > 0) {
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       return sem;
+               }
+       } while (1);
+
+       list_add_tail(&waiter.list, &sem->read_wait);
+       sem->wait_readers++;
+
+       waiter.task = tsk;
+       get_task_struct(tsk);
+
+       /* if there are no active locks, wake the new lock owner(s) */
+       if ((count & LDSEM_ACTIVE_MASK) == 0)
+               __ldsem_wake(sem);
+
+       raw_spin_unlock_irq(&sem->wait_lock);
+
+       /* wait to be given the lock */
+       for (;;) {
+               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+
+               if (!waiter.task)
+                       break;
+               if (!timeout)
+                       break;
+               timeout = schedule_timeout(timeout);
+       }
+
+       __set_task_state(tsk, TASK_RUNNING);
+
+       if (!timeout) {
+               /* lock timed out but check if this task was just
+                * granted lock ownership - if so, pretend there
+                * was no timeout; otherwise, cleanup lock wait */
+               raw_spin_lock_irq(&sem->wait_lock);
+               if (waiter.task) {
+                       ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+                       list_del(&waiter.list);
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       put_task_struct(waiter.task);
+                       return NULL;
+               }
+               raw_spin_unlock_irq(&sem->wait_lock);
+       }
+
+       return sem;
+}
+
+/*
+ * wait for the write lock to be granted
+ */
+static struct ld_semaphore __sched *
+down_write_failed(struct ld_semaphore *sem, long count, long timeout)
+{
+       struct ldsem_waiter waiter;
+       struct task_struct *tsk = current;
+       long adjust = -LDSEM_ACTIVE_BIAS;
+       int locked = 0;
+
+       /* set up my own style of waitqueue */
+       raw_spin_lock_irq(&sem->wait_lock);
+
+       /* Try to reverse the lock attempt but if the count has changed
+        * so that reversing fails, check if the lock is now owned,
+        * and early-out if so */
+       do {
+               if (ldsem_cmpxchg(&count, count + adjust, sem))
+                       break;
+               if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       return sem;
+               }
+       } while (1);
+
+       list_add_tail(&waiter.list, &sem->write_wait);
+
+       waiter.task = tsk;
+
+       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+       for (;;) {
+               if (!timeout)
+                       break;
+               raw_spin_unlock_irq(&sem->wait_lock);
+               timeout = schedule_timeout(timeout);
+               raw_spin_lock_irq(&sem->wait_lock);
+               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+               if ((locked = writer_trylock(sem)))
+                       break;
+       }
+
+       if (!locked)
+               ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+       list_del(&waiter.list);
+       raw_spin_unlock_irq(&sem->wait_lock);
+
+       __set_task_state(tsk, TASK_RUNNING);
+
+       /* lock wait may have timed out */
+       if (!locked)
+               return NULL;
+       return sem;
+}
+
+
+
+static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
+                                          int subclass, long timeout)
+{
+       long count;
+
+       lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
+
+       count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
+       if (count <= 0) {
+               lock_stat(sem, contended);
+               if (!down_read_failed(sem, count, timeout)) {
+                       lockdep_release(sem, 1, _RET_IP_);
+                       return 0;
+               }
+       }
+       lock_stat(sem, acquired);
+       return 1;
+}
+
+static inline int __ldsem_down_write_nested(struct ld_semaphore *sem,
+                                           int subclass, long timeout)
+{
+       long count;
+
+       lockdep_acquire(sem, subclass, 0, _RET_IP_);
+
+       count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
+       if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
+               lock_stat(sem, contended);
+               if (!down_write_failed(sem, count, timeout)) {
+                       lockdep_release(sem, 1, _RET_IP_);
+                       return 0;
+               }
+       }
+       lock_stat(sem, acquired);
+       return 1;
+}
+
+
+/*
+ * lock for reading -- returns 1 if successful, 0 if timed out
+ */
+int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
+{
+       might_sleep();
+       return __ldsem_down_read_nested(sem, 0, timeout);
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+int ldsem_down_read_trylock(struct ld_semaphore *sem)
+{
+       long count = sem->count;
+
+       while (count >= 0) {
+               if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
+                       lockdep_acquire_read(sem, 0, 1, _RET_IP_);
+                       lock_stat(sem, acquired);
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * lock for writing -- returns 1 if successful, 0 if timed out
+ */
+int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
+{
+       might_sleep();
+       return __ldsem_down_write_nested(sem, 0, timeout);
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+int ldsem_down_write_trylock(struct ld_semaphore *sem)
+{
+       long count = sem->count;
+
+       while ((count & LDSEM_ACTIVE_MASK) == 0) {
+               if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
+                       lockdep_acquire(sem, 0, 1, _RET_IP_);
+                       lock_stat(sem, acquired);
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * release a read lock
+ */
+void ldsem_up_read(struct ld_semaphore *sem)
+{
+       long count;
+
+       lockdep_release(sem, 1, _RET_IP_);
+
+       count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
+       if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
+               ldsem_wake(sem);
+}
+
+/*
+ * release a write lock
+ */
+void ldsem_up_write(struct ld_semaphore *sem)
+{
+       long count;
+
+       lockdep_release(sem, 1, _RET_IP_);
+
+       count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
+       if (count < 0)
+               ldsem_wake(sem);
+}
+
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
+{
+       might_sleep();
+       return __ldsem_down_read_nested(sem, subclass, timeout);
+}
+
+int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
+                           long timeout)
+{
+       might_sleep();
+       return __ldsem_down_write_nested(sem, subclass, timeout);
+}
+
+#endif
index 740202d8a5c4b732ab9c98b188c995d3a0f78587..c677829baa8b0d54adfefe1a9276c62ed9f3f03d 100644 (file)
@@ -3086,17 +3086,6 @@ err:
 };
 
 
-static int bind_con_driver(const struct consw *csw, int first, int last,
-                          int deflt)
-{
-       int ret;
-
-       console_lock();
-       ret = do_bind_con_driver(csw, first, last, deflt);
-       console_unlock();
-       return ret;
-}
-
 #ifdef CONFIG_VT_HW_CONSOLE_BINDING
 static int con_is_graphics(const struct consw *csw, int first, int last)
 {
@@ -3114,34 +3103,6 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
        return retval;
 }
 
-/**
- * unbind_con_driver - unbind a console driver
- * @csw: pointer to console driver to unregister
- * @first: first in range of consoles that @csw should be unbound from
- * @last: last in range of consoles that @csw should be unbound from
- * @deflt: should next bound console driver be default after @csw is unbound?
- *
- * To unbind a driver from all possible consoles, pass 0 as @first and
- * %MAX_NR_CONSOLES as @last.
- *
- * @deflt controls whether the console that ends up replacing @csw should be
- * the default console.
- *
- * RETURNS:
- * -ENODEV if @csw isn't a registered console driver or can't be unregistered
- * or 0 on success.
- */
-int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
-{
-       int retval;
-
-       console_lock();
-       retval = do_unbind_con_driver(csw, first, last, deflt);
-       console_unlock();
-       return retval;
-}
-EXPORT_SYMBOL(unbind_con_driver);
-
 /* unlocked version of unbind_con_driver() */
 int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
 {
@@ -3262,8 +3223,11 @@ static int vt_bind(struct con_driver *con)
                if (first == 0 && last == MAX_NR_CONSOLES -1)
                        deflt = 1;
 
-               if (first != -1)
-                       bind_con_driver(csw, first, last, deflt);
+               if (first != -1) {
+                       console_lock();
+                       do_bind_con_driver(csw, first, last, deflt);
+                       console_unlock();
+               }
 
                first = -1;
                last = -1;
@@ -3301,8 +3265,11 @@ static int vt_unbind(struct con_driver *con)
                if (first == 0 && last == MAX_NR_CONSOLES -1)
                        deflt = 1;
 
-               if (first != -1)
-                       unbind_con_driver(csw, first, last, deflt);
+               if (first != -1) {
+                       console_lock();
+                       do_unbind_con_driver(csw, first, last, deflt);
+                       console_unlock();
+               }
 
                first = -1;
                last = -1;
@@ -3574,29 +3541,9 @@ err:
        return retval;
 }
 
-/**
- * register_con_driver - register console driver to console layer
- * @csw: console driver
- * @first: the first console to take over, minimum value is 0
- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
- *
- * DESCRIPTION: This function registers a console driver which can later
- * bind to a range of consoles specified by @first and @last. It will
- * also initialize the console driver by calling con_startup().
- */
-int register_con_driver(const struct consw *csw, int first, int last)
-{
-       int retval;
-
-       console_lock();
-       retval = do_register_con_driver(csw, first, last);
-       console_unlock();
-       return retval;
-}
-EXPORT_SYMBOL(register_con_driver);
 
 /**
- * unregister_con_driver - unregister console driver from console layer
+ * do_unregister_con_driver - unregister console driver from console layer
  * @csw: console driver
  *
  * DESCRIPTION: All drivers that registers to the console layer must
@@ -3606,17 +3553,6 @@ EXPORT_SYMBOL(register_con_driver);
  *
  * The driver must unbind first prior to unregistration.
  */
-int unregister_con_driver(const struct consw *csw)
-{
-       int retval;
-
-       console_lock();
-       retval = do_unregister_con_driver(csw);
-       console_unlock();
-       return retval;
-}
-EXPORT_SYMBOL(unregister_con_driver);
-
 int do_unregister_con_driver(const struct consw *csw)
 {
        int i, retval = -ENODEV;
@@ -3654,7 +3590,7 @@ EXPORT_SYMBOL_GPL(do_unregister_con_driver);
  *     when a driver wants to take over some existing consoles
  *     and become default driver for newly opened ones.
  *
- *     take_over_console is basically a register followed by unbind
+ *     do_take_over_console is basically a register followed by unbind
  */
 int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
 {
@@ -3675,30 +3611,6 @@ int do_take_over_console(const struct consw *csw, int first, int last, int deflt
 }
 EXPORT_SYMBOL_GPL(do_take_over_console);
 
-/*
- *     If we support more console drivers, this function is used
- *     when a driver wants to take over some existing consoles
- *     and become default driver for newly opened ones.
- *
- *     take_over_console is basically a register followed by unbind
- */
-int take_over_console(const struct consw *csw, int first, int last, int deflt)
-{
-       int err;
-
-       err = register_con_driver(csw, first, last);
-       /*
-        * If we get an busy error we still want to bind the console driver
-        * and return success, as we may have unbound the console driver
-        * but not unregistered it.
-        */
-       if (err == -EBUSY)
-               err = 0;
-       if (!err)
-               bind_con_driver(csw, first, last, deflt);
-
-       return err;
-}
 
 /*
  * give_up_console is a wrapper to unregister_con_driver. It will only
@@ -3706,7 +3618,9 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
  */
 void give_up_console(const struct consw *csw)
 {
-       unregister_con_driver(csw);
+       console_lock();
+       do_unregister_con_driver(csw);
+       console_unlock();
 }
 
 static int __init vtconsole_class_init(void)
@@ -4262,6 +4176,5 @@ EXPORT_SYMBOL(console_blanked);
 EXPORT_SYMBOL(vc_cons);
 EXPORT_SYMBOL(global_cursor_default);
 #ifndef VT_SINGLE_DRIVER
-EXPORT_SYMBOL(take_over_console);
 EXPORT_SYMBOL(give_up_console);
 #endif
index fc2c06c66e89d5e7536c5bb5387bc0fd9a837086..2bd78e2ac8ecba07dbf42788b6c61506ee8b2b20 100644 (file)
@@ -289,13 +289,10 @@ static int vt_disallocate(unsigned int vc_num)
        struct vc_data *vc = NULL;
        int ret = 0;
 
-       if (!vc_num)
-               return 0;
-
        console_lock();
        if (VT_BUSY(vc_num))
                ret = -EBUSY;
-       else
+       else if (vc_num)
                vc = vc_deallocate(vc_num);
        console_unlock();
 
index 92e1dc94ecc87d2bd11cc8033a7bf4acc6a088a5..73f62caa86097c0e735299589808a5c2be8faad9 100644 (file)
@@ -2,59 +2,15 @@
 # USB device configuration
 #
 
-# many non-PCI SOC chips embed OHCI
+# These are unused now, remove them once they are no longer selected
 config USB_ARCH_HAS_OHCI
-       boolean
-       # ARM:
-       default y if SA1111
-       default y if ARCH_OMAP
-       default y if ARCH_S3C24XX
-       default y if PXA27x
-       default y if PXA3xx
-       default y if ARCH_EP93XX
-       default y if ARCH_AT91
-       default y if MFD_TC6393XB
-       default y if ARCH_W90X900
-       default y if ARCH_DAVINCI_DA8XX
-       default y if ARCH_CNS3XXX
-       default y if PLAT_SPEAR
-       default y if ARCH_EXYNOS
-       # PPC:
-       default y if STB03xxx
-       default y if PPC_MPC52xx
-       # MIPS:
-       default y if MIPS_ALCHEMY
-       default y if MACH_JZ4740
-       # more:
-       default PCI
-
-# some non-PCI hcds implement EHCI
+       bool
+
 config USB_ARCH_HAS_EHCI
-       boolean
-       default y if FSL_SOC
-       default y if PPC_MPC512x
-       default y if ARCH_IXP4XX
-       default y if ARCH_W90X900
-       default y if ARCH_AT91
-       default y if ARCH_MXC
-       default y if ARCH_MXS
-       default y if ARCH_OMAP3
-       default y if ARCH_CNS3XXX
-       default y if ARCH_VT8500
-       default y if PLAT_SPEAR
-       default y if PLAT_S5P
-       default y if ARCH_MSM
-       default y if MICROBLAZE
-       default y if SPARC_LEON
-       default y if ARCH_MMP
-       default y if MACH_LOONGSON1
-       default y if PLAT_ORION
-       default PCI
-
-# some non-PCI HCDs implement xHCI
+       bool
+
 config USB_ARCH_HAS_XHCI
-       boolean
-       default PCI
+       bool
 
 menuconfig USB_SUPPORT
        bool "USB support"
@@ -71,19 +27,8 @@ config USB_COMMON
        default y
        depends on USB || USB_GADGET
 
-# Host-side USB depends on having a host controller
-# NOTE:  dummy_hcd is always an option, but it's ignored here ...
-# NOTE:  SL-811 option should be board-specific ...
 config USB_ARCH_HAS_HCD
-       boolean
-       default y if USB_ARCH_HAS_OHCI
-       default y if USB_ARCH_HAS_EHCI
-       default y if USB_ARCH_HAS_XHCI
-       default y if PCMCIA && !M32R                    # sl811_cs
-       default y if ARM                                # SL-811
-       default y if BLACKFIN                           # SL-811
-       default y if SUPERH                             # r8a66597-hcd
-       default PCI
+       def_bool y
 
 # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
 config USB
index c41feba8d5c0737c926aa6a576f50b0c290a626a..238c5d47cadb47d469d871ad6cf0e86dc0675c8f 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_USB_HWA_HCD)     += host/
 obj-$(CONFIG_USB_ISP1760_HCD)  += host/
 obj-$(CONFIG_USB_IMX21_HCD)    += host/
 obj-$(CONFIG_USB_FSL_MPH_DR_OF)        += host/
+obj-$(CONFIG_USB_FUSBH200_HCD) += host/
 
 obj-$(CONFIG_USB_C67X00_HCD)   += c67x00/
 
index b2df442eb3e5b9537772569cfd321e32e5ef2ce5..eb2aa2e5a842f48eae096d29e279572982e215b8 100644 (file)
@@ -12,15 +12,15 @@ if USB_CHIPIDEA
 
 config USB_CHIPIDEA_UDC
        bool "ChipIdea device controller"
-       depends on USB_GADGET=y || USB_GADGET=USB_CHIPIDEA
+       depends on USB_GADGET=y || USB_CHIPIDEA=m
        help
          Say Y here to enable device controller functionality of the
          ChipIdea driver.
 
 config USB_CHIPIDEA_HOST
        bool "ChipIdea host controller"
-       depends on USB=y || USB=USB_CHIPIDEA
-       depends on USB_EHCI_HCD=y
+       depends on USB=y
+       depends on USB_EHCI_HCD=y || USB_CHIPIDEA=m
        select USB_EHCI_ROOT_HUB_TT
        help
          Say Y here to enable host controller functionality of the
index 4ab83e98219b7b9c15ebcc78ca2716b650653574..3bbbcba03815de28a5cb78e03aabba1044d9932d 100644 (file)
@@ -9,13 +9,13 @@ ci_hdrc-$(CONFIG_USB_CHIPIDEA_DEBUG)  += debug.o
 
 # Glue/Bridge layers go here
 
-obj-$(CONFIG_USB_CHIPIDEA)     += ci13xxx_msm.o
+obj-$(CONFIG_USB_CHIPIDEA)     += ci_hdrc_msm.o
 
 # PCI doesn't provide stubs, need to check
 ifneq ($(CONFIG_PCI),)
-       obj-$(CONFIG_USB_CHIPIDEA)      += ci13xxx_pci.o
+       obj-$(CONFIG_USB_CHIPIDEA)      += ci_hdrc_pci.o
 endif
 
 ifneq ($(CONFIG_OF_DEVICE),)
-       obj-$(CONFIG_USB_CHIPIDEA)      += ci13xxx_imx.o usbmisc_imx.o
+       obj-$(CONFIG_USB_CHIPIDEA)      += ci_hdrc_imx.o usbmisc_imx.o
 endif
index 050de8562a04b482da6a2cfb3d707609aff6b66d..aefa0261220c048322a62059ddc35f9df047e7bb 100644 (file)
 #define PORTSC_SUSP           BIT(7)
 #define PORTSC_HSP            BIT(9)
 #define PORTSC_PTC            (0x0FUL << 16)
+/* PTS and PTW for non lpm version only */
+#define PORTSC_PTS(d)                                          \
+       ((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
+#define PORTSC_PTW            BIT(28)
+#define PORTSC_STS            BIT(29)
 
 /* DEVLC */
 #define DEVLC_PSPD            (0x03UL << 25)
-#define    DEVLC_PSPD_HS      (0x02UL << 25)
+#define DEVLC_PSPD_HS         (0x02UL << 25)
+#define DEVLC_PTW             BIT(27)
+#define DEVLC_STS             BIT(28)
+#define DEVLC_PTS(d)          (((d) & 0x7) << 29)
+
+/* Encoding for DEVLC_PTS and PORTSC_PTS */
+#define PTS_UTMI              0
+#define PTS_ULPI              2
+#define PTS_SERIAL            3
+#define PTS_HSIC              4
 
 /* OTGSC */
 #define OTGSC_IDPU           BIT(5)
index b0a6bce064ca9a4f6ed7e4b19657165c48821686..33cb29f36e0610ff2164c137d6f46c45d8917108 100644 (file)
  * DEFINE
  *****************************************************************************/
 #define TD_PAGE_COUNT      5
-#define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
+#define CI_HDRC_PAGE_SIZE  4096ul /* page size for TD's */
 #define ENDPT_MAX          32
 
 /******************************************************************************
  * STRUCTURES
  *****************************************************************************/
 /**
- * struct ci13xxx_ep - endpoint representation
+ * struct ci_hw_ep - endpoint representation
  * @ep: endpoint structure for gadget drivers
  * @dir: endpoint direction (TX/RX)
  * @num: endpoint number
@@ -41,7 +41,7 @@
  * @lock: pointer to controller's spinlock
  * @td_pool: pointer to controller's TD pool
  */
-struct ci13xxx_ep {
+struct ci_hw_ep {
        struct usb_ep                           ep;
        u8                                      dir;
        u8                                      num;
@@ -49,15 +49,16 @@ struct ci13xxx_ep {
        char                                    name[16];
        struct {
                struct list_head        queue;
-               struct ci13xxx_qh       *ptr;
+               struct ci_hw_qh         *ptr;
                dma_addr_t              dma;
        }                                       qh;
        int                                     wedge;
 
        /* global resources */
-       struct ci13xxx                          *ci;
+       struct ci_hdrc                          *ci;
        spinlock_t                              *lock;
        struct dma_pool                         *td_pool;
+       struct td_node                          *pending_td;
 };
 
 enum ci_role {
@@ -74,9 +75,9 @@ enum ci_role {
  * name: role name string (host/gadget)
  */
 struct ci_role_driver {
-       int             (*start)(struct ci13xxx *);
-       void            (*stop)(struct ci13xxx *);
-       irqreturn_t     (*irq)(struct ci13xxx *);
+       int             (*start)(struct ci_hdrc *);
+       void            (*stop)(struct ci_hdrc *);
+       irqreturn_t     (*irq)(struct ci_hdrc *);
        const char      *name;
 };
 
@@ -101,7 +102,7 @@ struct hw_bank {
 };
 
 /**
- * struct ci13xxx - chipidea device representation
+ * struct ci_hdrc - chipidea device representation
  * @dev: pointer to parent device
  * @lock: access synchronization
  * @hw_bank: hardware register mapping
@@ -116,7 +117,7 @@ struct hw_bank {
  * @gadget: device side representation for peripheral controller
  * @driver: gadget driver
  * @hw_ep_max: total number of endpoints supported by hardware
- * @ci13xxx_ep: array of endpoints
+ * @ci_hw_ep: array of endpoints
  * @ep0_dir: ep0 direction
  * @ep0out: pointer to ep0 OUT endpoint
  * @ep0in: pointer to ep0 IN endpoint
@@ -132,7 +133,7 @@ struct hw_bank {
  * @hcd: pointer to usb_hcd for ehci host driver
  * @debugfs: root dentry for this controller in debugfs
  */
-struct ci13xxx {
+struct ci_hdrc {
        struct device                   *dev;
        spinlock_t                      lock;
        struct hw_bank                  hw_bank;
@@ -149,9 +150,9 @@ struct ci13xxx {
        struct usb_gadget               gadget;
        struct usb_gadget_driver        *driver;
        unsigned                        hw_ep_max;
-       struct ci13xxx_ep               ci13xxx_ep[ENDPT_MAX];
+       struct ci_hw_ep                 ci_hw_ep[ENDPT_MAX];
        u32                             ep0_dir;
-       struct ci13xxx_ep               *ep0out, *ep0in;
+       struct ci_hw_ep                 *ep0out, *ep0in;
 
        struct usb_request              *status;
        bool                            setaddr;
@@ -160,7 +161,7 @@ struct ci13xxx {
        u8                              suspended;
        u8                              test_mode;
 
-       struct ci13xxx_platform_data    *platdata;
+       struct ci_hdrc_platform_data    *platdata;
        int                             vbus_active;
        /* FIXME: some day, we'll not use global phy */
        bool                            global_phy;
@@ -169,13 +170,13 @@ struct ci13xxx {
        struct dentry                   *debugfs;
 };
 
-static inline struct ci_role_driver *ci_role(struct ci13xxx *ci)
+static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
 {
        BUG_ON(ci->role >= CI_ROLE_END || !ci->roles[ci->role]);
        return ci->roles[ci->role];
 }
 
-static inline int ci_role_start(struct ci13xxx *ci, enum ci_role role)
+static inline int ci_role_start(struct ci_hdrc *ci, enum ci_role role)
 {
        int ret;
 
@@ -191,7 +192,7 @@ static inline int ci_role_start(struct ci13xxx *ci, enum ci_role role)
        return ret;
 }
 
-static inline void ci_role_stop(struct ci13xxx *ci)
+static inline void ci_role_stop(struct ci_hdrc *ci)
 {
        enum ci_role role = ci->role;
 
@@ -210,7 +211,7 @@ static inline void ci_role_stop(struct ci13xxx *ci)
 #define REG_BITS   (32)
 
 /* register indices */
-enum ci13xxx_regs {
+enum ci_hw_regs {
        CAP_CAPLENGTH,
        CAP_HCCPARAMS,
        CAP_DCCPARAMS,
@@ -242,7 +243,7 @@ enum ci13xxx_regs {
  *
  * This function returns register contents
  */
-static inline u32 hw_read(struct ci13xxx *ci, enum ci13xxx_regs reg, u32 mask)
+static inline u32 hw_read(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask)
 {
        return ioread32(ci->hw_bank.regmap[reg]) & mask;
 }
@@ -253,7 +254,7 @@ static inline u32 hw_read(struct ci13xxx *ci, enum ci13xxx_regs reg, u32 mask)
  * @mask: bitfield mask
  * @data: new value
  */
-static inline void hw_write(struct ci13xxx *ci, enum ci13xxx_regs reg,
+static inline void hw_write(struct ci_hdrc *ci, enum ci_hw_regs reg,
                            u32 mask, u32 data)
 {
        if (~mask)
@@ -270,7 +271,7 @@ static inline void hw_write(struct ci13xxx *ci, enum ci13xxx_regs reg,
  *
  * This function returns register contents
  */
-static inline u32 hw_test_and_clear(struct ci13xxx *ci, enum ci13xxx_regs reg,
+static inline u32 hw_test_and_clear(struct ci_hdrc *ci, enum ci_hw_regs reg,
                                    u32 mask)
 {
        u32 val = ioread32(ci->hw_bank.regmap[reg]) & mask;
@@ -287,7 +288,7 @@ static inline u32 hw_test_and_clear(struct ci13xxx *ci, enum ci13xxx_regs reg,
  *
  * This function returns register contents
  */
-static inline u32 hw_test_and_write(struct ci13xxx *ci, enum ci13xxx_regs reg,
+static inline u32 hw_test_and_write(struct ci_hdrc *ci, enum ci_hw_regs reg,
                                    u32 mask, u32 data)
 {
        u32 val = hw_read(ci, reg, ~0);
@@ -296,10 +297,10 @@ static inline u32 hw_test_and_write(struct ci13xxx *ci, enum ci13xxx_regs reg,
        return (val & mask) >> __ffs(mask);
 }
 
-int hw_device_reset(struct ci13xxx *ci, u32 mode);
+int hw_device_reset(struct ci_hdrc *ci, u32 mode);
 
-int hw_port_test_set(struct ci13xxx *ci, u8 mode);
+int hw_port_test_set(struct ci_hdrc *ci, u8 mode);
 
-u8 hw_port_test_get(struct ci13xxx *ci);
+u8 hw_port_test_get(struct ci_hdrc *ci);
 
 #endif /* __DRIVERS_USB_CHIPIDEA_CI_H */
diff --git a/drivers/usb/chipidea/ci13xxx_imx.c b/drivers/usb/chipidea/ci13xxx_imx.c
deleted file mode 100644 (file)
index 73f9d5f..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Marek Vasut <marex@denx.de>
- * on behalf of DENX Software Engineering GmbH
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/dma-mapping.h>
-#include <linux/usb/chipidea.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-#include <linux/pinctrl/consumer.h>
-
-#include "ci.h"
-#include "ci13xxx_imx.h"
-
-#define pdev_to_phy(pdev) \
-       ((struct usb_phy *)platform_get_drvdata(pdev))
-
-struct ci13xxx_imx_data {
-       struct device_node *phy_np;
-       struct usb_phy *phy;
-       struct platform_device *ci_pdev;
-       struct clk *clk;
-       struct regulator *reg_vbus;
-};
-
-static const struct usbmisc_ops *usbmisc_ops;
-
-/* Common functions shared by usbmisc drivers */
-
-int usbmisc_set_ops(const struct usbmisc_ops *ops)
-{
-       if (usbmisc_ops)
-               return -EBUSY;
-
-       usbmisc_ops = ops;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(usbmisc_set_ops);
-
-void usbmisc_unset_ops(const struct usbmisc_ops *ops)
-{
-       usbmisc_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(usbmisc_unset_ops);
-
-int usbmisc_get_init_data(struct device *dev, struct usbmisc_usb_device *usbdev)
-{
-       struct device_node *np = dev->of_node;
-       struct of_phandle_args args;
-       int ret;
-
-       usbdev->dev = dev;
-
-       ret = of_parse_phandle_with_args(np, "fsl,usbmisc", "#index-cells",
-                                       0, &args);
-       if (ret) {
-               dev_err(dev, "Failed to parse property fsl,usbmisc, errno %d\n",
-                       ret);
-               memset(usbdev, 0, sizeof(*usbdev));
-               return ret;
-       }
-       usbdev->index = args.args[0];
-       of_node_put(args.np);
-
-       if (of_find_property(np, "disable-over-current", NULL))
-               usbdev->disable_oc = 1;
-
-       if (of_find_property(np, "external-vbus-divider", NULL))
-               usbdev->evdo = 1;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(usbmisc_get_init_data);
-
-/* End of common functions shared by usbmisc drivers*/
-
-static struct ci13xxx_platform_data ci13xxx_imx_platdata  = {
-       .name                   = "ci13xxx_imx",
-       .flags                  = CI13XXX_REQUIRE_TRANSCEIVER |
-                                 CI13XXX_PULLUP_ON_VBUS |
-                                 CI13XXX_DISABLE_STREAMING,
-       .capoffset              = DEF_CAPOFFSET,
-};
-
-static int ci13xxx_imx_probe(struct platform_device *pdev)
-{
-       struct ci13xxx_imx_data *data;
-       struct platform_device *plat_ci, *phy_pdev;
-       struct device_node *phy_np;
-       struct resource *res;
-       struct regulator *reg_vbus;
-       struct pinctrl *pinctrl;
-       int ret;
-
-       if (of_find_property(pdev->dev.of_node, "fsl,usbmisc", NULL)
-               && !usbmisc_ops)
-               return -EPROBE_DEFER;
-
-       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
-       if (!data) {
-               dev_err(&pdev->dev, "Failed to allocate CI13xxx-IMX data!\n");
-               return -ENOMEM;
-       }
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Can't get device resources!\n");
-               return -ENOENT;
-       }
-
-       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(pinctrl))
-               dev_warn(&pdev->dev, "pinctrl get/select failed, err=%ld\n",
-                       PTR_ERR(pinctrl));
-
-       data->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(data->clk)) {
-               dev_err(&pdev->dev,
-                       "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
-               return PTR_ERR(data->clk);
-       }
-
-       ret = clk_prepare_enable(data->clk);
-       if (ret) {
-               dev_err(&pdev->dev,
-                       "Failed to prepare or enable clock, err=%d\n", ret);
-               return ret;
-       }
-
-       phy_np = of_parse_phandle(pdev->dev.of_node, "fsl,usbphy", 0);
-       if (phy_np) {
-               data->phy_np = phy_np;
-               phy_pdev = of_find_device_by_node(phy_np);
-               if (phy_pdev) {
-                       struct usb_phy *phy;
-                       phy = pdev_to_phy(phy_pdev);
-                       if (phy &&
-                           try_module_get(phy_pdev->dev.driver->owner)) {
-                               usb_phy_init(phy);
-                               data->phy = phy;
-                       }
-               }
-       }
-
-       /* we only support host now, so enable vbus here */
-       reg_vbus = devm_regulator_get(&pdev->dev, "vbus");
-       if (!IS_ERR(reg_vbus)) {
-               ret = regulator_enable(reg_vbus);
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "Failed to enable vbus regulator, err=%d\n",
-                               ret);
-                       goto put_np;
-               }
-               data->reg_vbus = reg_vbus;
-       } else {
-               reg_vbus = NULL;
-       }
-
-       ci13xxx_imx_platdata.phy = data->phy;
-
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-
-       if (usbmisc_ops && usbmisc_ops->init) {
-               ret = usbmisc_ops->init(&pdev->dev);
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "usbmisc init failed, ret=%d\n", ret);
-                       goto err;
-               }
-       }
-
-       plat_ci = ci13xxx_add_device(&pdev->dev,
-                               pdev->resource, pdev->num_resources,
-                               &ci13xxx_imx_platdata);
-       if (IS_ERR(plat_ci)) {
-               ret = PTR_ERR(plat_ci);
-               dev_err(&pdev->dev,
-                       "Can't register ci_hdrc platform device, err=%d\n",
-                       ret);
-               goto err;
-       }
-
-       if (usbmisc_ops && usbmisc_ops->post) {
-               ret = usbmisc_ops->post(&pdev->dev);
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "usbmisc post failed, ret=%d\n", ret);
-                       goto put_np;
-               }
-       }
-
-       data->ci_pdev = plat_ci;
-       platform_set_drvdata(pdev, data);
-
-       pm_runtime_no_callbacks(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-
-       return 0;
-
-err:
-       if (reg_vbus)
-               regulator_disable(reg_vbus);
-put_np:
-       if (phy_np)
-               of_node_put(phy_np);
-       clk_disable_unprepare(data->clk);
-       return ret;
-}
-
-static int ci13xxx_imx_remove(struct platform_device *pdev)
-{
-       struct ci13xxx_imx_data *data = platform_get_drvdata(pdev);
-
-       pm_runtime_disable(&pdev->dev);
-       ci13xxx_remove_device(data->ci_pdev);
-
-       if (data->reg_vbus)
-               regulator_disable(data->reg_vbus);
-
-       if (data->phy) {
-               usb_phy_shutdown(data->phy);
-               module_put(data->phy->dev->driver->owner);
-       }
-
-       of_node_put(data->phy_np);
-
-       clk_disable_unprepare(data->clk);
-
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static const struct of_device_id ci13xxx_imx_dt_ids[] = {
-       { .compatible = "fsl,imx27-usb", },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, ci13xxx_imx_dt_ids);
-
-static struct platform_driver ci13xxx_imx_driver = {
-       .probe = ci13xxx_imx_probe,
-       .remove = ci13xxx_imx_remove,
-       .driver = {
-               .name = "imx_usb",
-               .owner = THIS_MODULE,
-               .of_match_table = ci13xxx_imx_dt_ids,
-        },
-};
-
-module_platform_driver(ci13xxx_imx_driver);
-
-MODULE_ALIAS("platform:imx-usb");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CI13xxx i.MX USB binding");
-MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
-MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/chipidea/ci13xxx_imx.h b/drivers/usb/chipidea/ci13xxx_imx.h
deleted file mode 100644 (file)
index 550bfa4..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-/* Used to set SoC specific callbacks */
-struct usbmisc_ops {
-       /* It's called once when probe a usb device */
-       int (*init)(struct device *dev);
-       /* It's called once after adding a usb device */
-       int (*post)(struct device *dev);
-};
-
-struct usbmisc_usb_device {
-       struct device *dev; /* usb controller device */
-       int index;
-
-       unsigned int disable_oc:1; /* over current detect disabled */
-       unsigned int evdo:1; /* set external vbus divider option */
-};
-
-int usbmisc_set_ops(const struct usbmisc_ops *ops);
-void usbmisc_unset_ops(const struct usbmisc_ops *ops);
-int
-usbmisc_get_init_data(struct device *dev, struct usbmisc_usb_device *usbdev);
diff --git a/drivers/usb/chipidea/ci13xxx_msm.c b/drivers/usb/chipidea/ci13xxx_msm.c
deleted file mode 100644 (file)
index 7d16681..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/chipidea.h>
-
-#include "ci.h"
-
-#define MSM_USB_BASE   (ci->hw_bank.abs)
-
-static void ci13xxx_msm_notify_event(struct ci13xxx *ci, unsigned event)
-{
-       struct device *dev = ci->gadget.dev.parent;
-       int val;
-
-       switch (event) {
-       case CI13XXX_CONTROLLER_RESET_EVENT:
-               dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
-               writel(0, USB_AHBBURST);
-               writel(0, USB_AHBMODE);
-               break;
-       case CI13XXX_CONTROLLER_STOPPED_EVENT:
-               dev_dbg(dev, "CI13XXX_CONTROLLER_STOPPED_EVENT received\n");
-               /*
-                * Put the transceiver in non-driving mode. Otherwise host
-                * may not detect soft-disconnection.
-                */
-               val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
-               val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
-               val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
-               usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
-               break;
-       default:
-               dev_dbg(dev, "unknown ci13xxx event\n");
-               break;
-       }
-}
-
-static struct ci13xxx_platform_data ci13xxx_msm_platdata = {
-       .name                   = "ci13xxx_msm",
-       .flags                  = CI13XXX_REGS_SHARED |
-                                 CI13XXX_REQUIRE_TRANSCEIVER |
-                                 CI13XXX_PULLUP_ON_VBUS |
-                                 CI13XXX_DISABLE_STREAMING,
-
-       .notify_event           = ci13xxx_msm_notify_event,
-};
-
-static int ci13xxx_msm_probe(struct platform_device *pdev)
-{
-       struct platform_device *plat_ci;
-
-       dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
-
-       plat_ci = ci13xxx_add_device(&pdev->dev,
-                               pdev->resource, pdev->num_resources,
-                               &ci13xxx_msm_platdata);
-       if (IS_ERR(plat_ci)) {
-               dev_err(&pdev->dev, "ci13xxx_add_device failed!\n");
-               return PTR_ERR(plat_ci);
-       }
-
-       platform_set_drvdata(pdev, plat_ci);
-
-       pm_runtime_no_callbacks(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-
-       return 0;
-}
-
-static int ci13xxx_msm_remove(struct platform_device *pdev)
-{
-       struct platform_device *plat_ci = platform_get_drvdata(pdev);
-
-       pm_runtime_disable(&pdev->dev);
-       ci13xxx_remove_device(plat_ci);
-
-       return 0;
-}
-
-static struct platform_driver ci13xxx_msm_driver = {
-       .probe = ci13xxx_msm_probe,
-       .remove = ci13xxx_msm_remove,
-       .driver = { .name = "msm_hsusb", },
-};
-
-module_platform_driver(ci13xxx_msm_driver);
-
-MODULE_ALIAS("platform:msm_hsusb");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/ci13xxx_pci.c b/drivers/usb/chipidea/ci13xxx_pci.c
deleted file mode 100644 (file)
index 4e1fc61..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * ci13xxx_pci.c - MIPS USB IP core family device controller
- *
- * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
- *
- * Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/chipidea.h>
-
-/* driver name */
-#define UDC_DRIVER_NAME   "ci13xxx_pci"
-
-/******************************************************************************
- * PCI block
- *****************************************************************************/
-static struct ci13xxx_platform_data pci_platdata = {
-       .name           = UDC_DRIVER_NAME,
-       .capoffset      = DEF_CAPOFFSET,
-};
-
-static struct ci13xxx_platform_data langwell_pci_platdata = {
-       .name           = UDC_DRIVER_NAME,
-       .capoffset      = 0,
-};
-
-static struct ci13xxx_platform_data penwell_pci_platdata = {
-       .name           = UDC_DRIVER_NAME,
-       .capoffset      = 0,
-       .power_budget   = 200,
-};
-
-/**
- * ci13xxx_pci_probe: PCI probe
- * @pdev: USB device controller being probed
- * @id:   PCI hotplug ID connecting controller to UDC framework
- *
- * This function returns an error code
- * Allocates basic PCI resources for this USB device controller, and then
- * invokes the udc_probe() method to start the UDC associated with it
- */
-static int ci13xxx_pci_probe(struct pci_dev *pdev,
-                                      const struct pci_device_id *id)
-{
-       struct ci13xxx_platform_data *platdata = (void *)id->driver_data;
-       struct platform_device *plat_ci;
-       struct resource res[3];
-       int retval = 0, nres = 2;
-
-       if (!platdata) {
-               dev_err(&pdev->dev, "device doesn't provide driver data\n");
-               return -ENODEV;
-       }
-
-       retval = pci_enable_device(pdev);
-       if (retval)
-               goto done;
-
-       if (!pdev->irq) {
-               dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
-               retval = -ENODEV;
-               goto disable_device;
-       }
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_set_master(pdev);
-       pci_try_set_mwi(pdev);
-
-       memset(res, 0, sizeof(res));
-       res[0].start    = pci_resource_start(pdev, 0);
-       res[0].end      = pci_resource_end(pdev, 0);
-       res[0].flags    = IORESOURCE_MEM;
-       res[1].start    = pdev->irq;
-       res[1].flags    = IORESOURCE_IRQ;
-
-       plat_ci = ci13xxx_add_device(&pdev->dev, res, nres, platdata);
-       if (IS_ERR(plat_ci)) {
-               dev_err(&pdev->dev, "ci13xxx_add_device failed!\n");
-               retval = PTR_ERR(plat_ci);
-               goto disable_device;
-       }
-
-       pci_set_drvdata(pdev, plat_ci);
-
-       return 0;
-
- disable_device:
-       pci_disable_device(pdev);
- done:
-       return retval;
-}
-
-/**
- * ci13xxx_pci_remove: PCI remove
- * @pdev: USB Device Controller being removed
- *
- * Reverses the effect of ci13xxx_pci_probe(),
- * first invoking the udc_remove() and then releases
- * all PCI resources allocated for this USB device controller
- */
-static void ci13xxx_pci_remove(struct pci_dev *pdev)
-{
-       struct platform_device *plat_ci = pci_get_drvdata(pdev);
-
-       ci13xxx_remove_device(plat_ci);
-       pci_set_drvdata(pdev, NULL);
-       pci_disable_device(pdev);
-}
-
-/**
- * PCI device table
- * PCI device structure
- *
- * Check "pci.h" for details
- */
-static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
-       {
-               PCI_DEVICE(0x153F, 0x1004),
-               .driver_data = (kernel_ulong_t)&pci_platdata,
-       },
-       {
-               PCI_DEVICE(0x153F, 0x1006),
-               .driver_data = (kernel_ulong_t)&pci_platdata,
-       },
-       {
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811),
-               .driver_data = (kernel_ulong_t)&langwell_pci_platdata,
-       },
-       {
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
-               .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
-       },
-       { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
-};
-MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
-
-static struct pci_driver ci13xxx_pci_driver = {
-       .name         = UDC_DRIVER_NAME,
-       .id_table     = ci13xxx_pci_id_table,
-       .probe        = ci13xxx_pci_probe,
-       .remove       = ci13xxx_pci_remove,
-};
-
-module_pci_driver(ci13xxx_pci_driver);
-
-MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
-MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("June 2008");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
new file mode 100644 (file)
index 0000000..14362c0
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Marek Vasut <marex@denx.de>
+ * on behalf of DENX Software Engineering GmbH
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/chipidea.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "ci.h"
+#include "ci_hdrc_imx.h"
+
+#define pdev_to_phy(pdev) \
+       ((struct usb_phy *)platform_get_drvdata(pdev))
+
+struct ci_hdrc_imx_data {
+       struct usb_phy *phy;
+       struct platform_device *ci_pdev;
+       struct clk *clk;
+       struct regulator *reg_vbus;
+};
+
+static const struct usbmisc_ops *usbmisc_ops;
+
+/* Common functions shared by usbmisc drivers */
+
+int usbmisc_set_ops(const struct usbmisc_ops *ops)
+{
+       if (usbmisc_ops)
+               return -EBUSY;
+
+       usbmisc_ops = ops;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(usbmisc_set_ops);
+
+void usbmisc_unset_ops(const struct usbmisc_ops *ops)
+{
+       usbmisc_ops = NULL;
+}
+EXPORT_SYMBOL_GPL(usbmisc_unset_ops);
+
+int usbmisc_get_init_data(struct device *dev, struct usbmisc_usb_device *usbdev)
+{
+       struct device_node *np = dev->of_node;
+       struct of_phandle_args args;
+       int ret;
+
+       usbdev->dev = dev;
+
+       ret = of_parse_phandle_with_args(np, "fsl,usbmisc", "#index-cells",
+                                       0, &args);
+       if (ret) {
+               dev_err(dev, "Failed to parse property fsl,usbmisc, errno %d\n",
+                       ret);
+               memset(usbdev, 0, sizeof(*usbdev));
+               return ret;
+       }
+       usbdev->index = args.args[0];
+       of_node_put(args.np);
+
+       if (of_find_property(np, "disable-over-current", NULL))
+               usbdev->disable_oc = 1;
+
+       if (of_find_property(np, "external-vbus-divider", NULL))
+               usbdev->evdo = 1;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(usbmisc_get_init_data);
+
+/* End of common functions shared by usbmisc drivers*/
+
+static int ci_hdrc_imx_probe(struct platform_device *pdev)
+{
+       struct ci_hdrc_imx_data *data;
+       struct ci_hdrc_platform_data pdata = {
+               .name           = "ci_hdrc_imx",
+               .capoffset      = DEF_CAPOFFSET,
+               .flags          = CI_HDRC_REQUIRE_TRANSCEIVER |
+                                 CI_HDRC_PULLUP_ON_VBUS |
+                                 CI_HDRC_DISABLE_STREAMING,
+       };
+       struct resource *res;
+       int ret;
+
+       if (of_find_property(pdev->dev.of_node, "fsl,usbmisc", NULL)
+               && !usbmisc_ops)
+               return -EPROBE_DEFER;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data) {
+               dev_err(&pdev->dev, "Failed to allocate ci_hdrc-imx data!\n");
+               return -ENOMEM;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Can't get device resources!\n");
+               return -ENOENT;
+       }
+
+       data->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(data->clk)) {
+               dev_err(&pdev->dev,
+                       "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
+               return PTR_ERR(data->clk);
+       }
+
+       ret = clk_prepare_enable(data->clk);
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "Failed to prepare or enable clock, err=%d\n", ret);
+               return ret;
+       }
+
+       data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
+       if (!IS_ERR(data->phy)) {
+               ret = usb_phy_init(data->phy);
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to init phy: %d\n", ret);
+                       goto err_clk;
+               }
+       } else if (PTR_ERR(data->phy) == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto err_clk;
+       }
+
+       /* we only support host now, so enable vbus here */
+       data->reg_vbus = devm_regulator_get(&pdev->dev, "vbus");
+       if (!IS_ERR(data->reg_vbus)) {
+               ret = regulator_enable(data->reg_vbus);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "Failed to enable vbus regulator, err=%d\n",
+                               ret);
+                       goto err_clk;
+               }
+       } else {
+               data->reg_vbus = NULL;
+       }
+
+       pdata.phy = data->phy;
+
+       if (!pdev->dev.dma_mask)
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+       if (usbmisc_ops && usbmisc_ops->init) {
+               ret = usbmisc_ops->init(&pdev->dev);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "usbmisc init failed, ret=%d\n", ret);
+                       goto err;
+               }
+       }
+
+       data->ci_pdev = ci_hdrc_add_device(&pdev->dev,
+                               pdev->resource, pdev->num_resources,
+                               &pdata);
+       if (IS_ERR(data->ci_pdev)) {
+               ret = PTR_ERR(data->ci_pdev);
+               dev_err(&pdev->dev,
+                       "Can't register ci_hdrc platform device, err=%d\n",
+                       ret);
+               goto err;
+       }
+
+       if (usbmisc_ops && usbmisc_ops->post) {
+               ret = usbmisc_ops->post(&pdev->dev);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "usbmisc post failed, ret=%d\n", ret);
+                       goto disable_device;
+               }
+       }
+
+       platform_set_drvdata(pdev, data);
+
+       pm_runtime_no_callbacks(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       return 0;
+
+disable_device:
+       ci_hdrc_remove_device(data->ci_pdev);
+err:
+       if (data->reg_vbus)
+               regulator_disable(data->reg_vbus);
+err_clk:
+       clk_disable_unprepare(data->clk);
+       return ret;
+}
+
+static int ci_hdrc_imx_remove(struct platform_device *pdev)
+{
+       struct ci_hdrc_imx_data *data = platform_get_drvdata(pdev);
+
+       pm_runtime_disable(&pdev->dev);
+       ci_hdrc_remove_device(data->ci_pdev);
+
+       if (data->reg_vbus)
+               regulator_disable(data->reg_vbus);
+
+       if (data->phy) {
+               usb_phy_shutdown(data->phy);
+               module_put(data->phy->dev->driver->owner);
+       }
+
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+       { .compatible = "fsl,imx27-usb", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+
+static struct platform_driver ci_hdrc_imx_driver = {
+       .probe = ci_hdrc_imx_probe,
+       .remove = ci_hdrc_imx_remove,
+       .driver = {
+               .name = "imx_usb",
+               .owner = THIS_MODULE,
+               .of_match_table = ci_hdrc_imx_dt_ids,
+        },
+};
+
+module_platform_driver(ci_hdrc_imx_driver);
+
+MODULE_ALIAS("platform:imx-usb");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CI HDRC i.MX USB binding");
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
new file mode 100644 (file)
index 0000000..550bfa4
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/* Used to set SoC specific callbacks */
+struct usbmisc_ops {
+       /* It's called once when probe a usb device */
+       int (*init)(struct device *dev);
+       /* It's called once after adding a usb device */
+       int (*post)(struct device *dev);
+};
+
+struct usbmisc_usb_device {
+       struct device *dev; /* usb controller device */
+       int index;
+
+       unsigned int disable_oc:1; /* over current detect disabled */
+       unsigned int evdo:1; /* set external vbus divider option */
+};
+
+int usbmisc_set_ops(const struct usbmisc_ops *ops);
+void usbmisc_unset_ops(const struct usbmisc_ops *ops);
+int
+usbmisc_get_init_data(struct device *dev, struct usbmisc_usb_device *usbdev);
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
new file mode 100644 (file)
index 0000000..fb657ef
--- /dev/null
@@ -0,0 +1,100 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb/ulpi.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/chipidea.h>
+
+#include "ci.h"
+
+#define MSM_USB_BASE   (ci->hw_bank.abs)
+
+static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
+{
+       struct device *dev = ci->gadget.dev.parent;
+       int val;
+
+       switch (event) {
+       case CI_HDRC_CONTROLLER_RESET_EVENT:
+               dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
+               writel(0, USB_AHBBURST);
+               writel(0, USB_AHBMODE);
+               break;
+       case CI_HDRC_CONTROLLER_STOPPED_EVENT:
+               dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n");
+               /*
+                * Put the transceiver in non-driving mode. Otherwise host
+                * may not detect soft-disconnection.
+                */
+               val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL);
+               val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+               val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+               usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL);
+               break;
+       default:
+               dev_dbg(dev, "unknown ci_hdrc event\n");
+               break;
+       }
+}
+
+static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
+       .name                   = "ci_hdrc_msm",
+       .flags                  = CI_HDRC_REGS_SHARED |
+                                 CI_HDRC_REQUIRE_TRANSCEIVER |
+                                 CI_HDRC_PULLUP_ON_VBUS |
+                                 CI_HDRC_DISABLE_STREAMING,
+
+       .notify_event           = ci_hdrc_msm_notify_event,
+};
+
+static int ci_hdrc_msm_probe(struct platform_device *pdev)
+{
+       struct platform_device *plat_ci;
+
+       dev_dbg(&pdev->dev, "ci_hdrc_msm_probe\n");
+
+       plat_ci = ci_hdrc_add_device(&pdev->dev,
+                               pdev->resource, pdev->num_resources,
+                               &ci_hdrc_msm_platdata);
+       if (IS_ERR(plat_ci)) {
+               dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
+               return PTR_ERR(plat_ci);
+       }
+
+       platform_set_drvdata(pdev, plat_ci);
+
+       pm_runtime_no_callbacks(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       return 0;
+}
+
+static int ci_hdrc_msm_remove(struct platform_device *pdev)
+{
+       struct platform_device *plat_ci = platform_get_drvdata(pdev);
+
+       pm_runtime_disable(&pdev->dev);
+       ci_hdrc_remove_device(plat_ci);
+
+       return 0;
+}
+
+static struct platform_driver ci_hdrc_msm_driver = {
+       .probe = ci_hdrc_msm_probe,
+       .remove = ci_hdrc_msm_remove,
+       .driver = { .name = "msm_hsusb", },
+};
+
+module_platform_driver(ci_hdrc_msm_driver);
+
+MODULE_ALIAS("platform:msm_hsusb");
+MODULE_ALIAS("platform:ci13xxx_msm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
new file mode 100644 (file)
index 0000000..042320a
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * ci_hdrc_pci.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/chipidea.h>
+
+/* driver name */
+#define UDC_DRIVER_NAME   "ci_hdrc_pci"
+
+/******************************************************************************
+ * PCI block
+ *****************************************************************************/
+static struct ci_hdrc_platform_data pci_platdata = {
+       .name           = UDC_DRIVER_NAME,
+       .capoffset      = DEF_CAPOFFSET,
+};
+
+static struct ci_hdrc_platform_data langwell_pci_platdata = {
+       .name           = UDC_DRIVER_NAME,
+       .capoffset      = 0,
+};
+
+static struct ci_hdrc_platform_data penwell_pci_platdata = {
+       .name           = UDC_DRIVER_NAME,
+       .capoffset      = 0,
+       .power_budget   = 200,
+};
+
+/**
+ * ci_hdrc_pci_probe: PCI probe
+ * @pdev: USB device controller being probed
+ * @id:   PCI hotplug ID connecting controller to UDC framework
+ *
+ * This function returns an error code
+ * Allocates basic PCI resources for this USB device controller, and then
+ * invokes the udc_probe() method to start the UDC associated with it
+ */
+static int ci_hdrc_pci_probe(struct pci_dev *pdev,
+                                      const struct pci_device_id *id)
+{
+       struct ci_hdrc_platform_data *platdata = (void *)id->driver_data;
+       struct platform_device *plat_ci;
+       struct resource res[3];
+       int retval = 0, nres = 2;
+
+       if (!platdata) {
+               dev_err(&pdev->dev, "device doesn't provide driver data\n");
+               return -ENODEV;
+       }
+
+       retval = pcim_enable_device(pdev);
+       if (retval)
+               return retval;
+
+       if (!pdev->irq) {
+               dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
+               return -ENODEV;
+       }
+
+       pci_set_master(pdev);
+       pci_try_set_mwi(pdev);
+
+       memset(res, 0, sizeof(res));
+       res[0].start    = pci_resource_start(pdev, 0);
+       res[0].end      = pci_resource_end(pdev, 0);
+       res[0].flags    = IORESOURCE_MEM;
+       res[1].start    = pdev->irq;
+       res[1].flags    = IORESOURCE_IRQ;
+
+       plat_ci = ci_hdrc_add_device(&pdev->dev, res, nres, platdata);
+       if (IS_ERR(plat_ci)) {
+               dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
+               return PTR_ERR(plat_ci);
+       }
+
+       pci_set_drvdata(pdev, plat_ci);
+
+       return 0;
+}
+
+/**
+ * ci_hdrc_pci_remove: PCI remove
+ * @pdev: USB Device Controller being removed
+ *
+ * Reverses the effect of ci_hdrc_pci_probe(),
+ * first invoking the udc_remove() and then releases
+ * all PCI resources allocated for this USB device controller
+ */
+static void ci_hdrc_pci_remove(struct pci_dev *pdev)
+{
+       struct platform_device *plat_ci = pci_get_drvdata(pdev);
+
+       ci_hdrc_remove_device(plat_ci);
+}
+
+/**
+ * PCI device table
+ * PCI device structure
+ *
+ * Check "pci.h" for details
+ */
+static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
+       {
+               PCI_DEVICE(0x153F, 0x1004),
+               .driver_data = (kernel_ulong_t)&pci_platdata,
+       },
+       {
+               PCI_DEVICE(0x153F, 0x1006),
+               .driver_data = (kernel_ulong_t)&pci_platdata,
+       },
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811),
+               .driver_data = (kernel_ulong_t)&langwell_pci_platdata,
+       },
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
+               .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
+       },
+       { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
+
+static struct pci_driver ci_hdrc_pci_driver = {
+       .name         = UDC_DRIVER_NAME,
+       .id_table     = ci_hdrc_pci_id_table,
+       .probe        = ci_hdrc_pci_probe,
+       .remove       = ci_hdrc_pci_remove,
+};
+
+module_pci_driver(ci_hdrc_pci_driver);
+
+MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
+MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("June 2008");
+MODULE_ALIAS("platform:ci13xxx_pci");
index 475c9c1146896dbefaf5ae71adbfcf86bcb5aa55..a5df24c578fc8b65b8945781a0cacb6e6bb33977 100644 (file)
@@ -43,8 +43,7 @@
  *
  * TODO List
  * - OTG
- * - Isochronous & Interrupt Traffic
- * - Handle requests which spawns into several TDs
+ * - Interrupt Traffic
  * - GET_STATUS(device) - always reports 0
  * - Gadget API (majority of optional features)
  * - Suspend & Remote Wakeup
@@ -64,6 +63,8 @@
 #include <linux/usb/gadget.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/chipidea.h>
+#include <linux/usb/of.h>
+#include <linux/phy.h>
 
 #include "ci.h"
 #include "udc.h"
@@ -116,7 +117,7 @@ static uintptr_t ci_regs_lpm[] = {
        [OP_ENDPTCTRL]          = 0x0ECUL,
 };
 
-static int hw_alloc_regmap(struct ci13xxx *ci, bool is_lpm)
+static int hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
 {
        int i;
 
@@ -148,7 +149,7 @@ static int hw_alloc_regmap(struct ci13xxx *ci, bool is_lpm)
  *
  * This function returns an error code
  */
-int hw_port_test_set(struct ci13xxx *ci, u8 mode)
+int hw_port_test_set(struct ci_hdrc *ci, u8 mode)
 {
        const u8 TEST_MODE_MAX = 7;
 
@@ -164,12 +165,12 @@ int hw_port_test_set(struct ci13xxx *ci, u8 mode)
  *
  * This function returns port test mode value
  */
-u8 hw_port_test_get(struct ci13xxx *ci)
+u8 hw_port_test_get(struct ci_hdrc *ci)
 {
        return hw_read(ci, OP_PORTSC, PORTSC_PTC) >> __ffs(PORTSC_PTC);
 }
 
-static int hw_device_init(struct ci13xxx *ci, void __iomem *base)
+static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
 {
        u32 reg;
 
@@ -208,13 +209,52 @@ static int hw_device_init(struct ci13xxx *ci, void __iomem *base)
        return 0;
 }
 
+static void hw_phymode_configure(struct ci_hdrc *ci)
+{
+       u32 portsc, lpm, sts;
+
+       switch (ci->platdata->phy_mode) {
+       case USBPHY_INTERFACE_MODE_UTMI:
+               portsc = PORTSC_PTS(PTS_UTMI);
+               lpm = DEVLC_PTS(PTS_UTMI);
+               break;
+       case USBPHY_INTERFACE_MODE_UTMIW:
+               portsc = PORTSC_PTS(PTS_UTMI) | PORTSC_PTW;
+               lpm = DEVLC_PTS(PTS_UTMI) | DEVLC_PTW;
+               break;
+       case USBPHY_INTERFACE_MODE_ULPI:
+               portsc = PORTSC_PTS(PTS_ULPI);
+               lpm = DEVLC_PTS(PTS_ULPI);
+               break;
+       case USBPHY_INTERFACE_MODE_SERIAL:
+               portsc = PORTSC_PTS(PTS_SERIAL);
+               lpm = DEVLC_PTS(PTS_SERIAL);
+               sts = 1;
+               break;
+       case USBPHY_INTERFACE_MODE_HSIC:
+               portsc = PORTSC_PTS(PTS_HSIC);
+               lpm = DEVLC_PTS(PTS_HSIC);
+               break;
+       default:
+               return;
+       }
+
+       if (ci->hw_bank.lpm) {
+               hw_write(ci, OP_DEVLC, DEVLC_PTS(7) | DEVLC_PTW, lpm);
+               hw_write(ci, OP_DEVLC, DEVLC_STS, sts);
+       } else {
+               hw_write(ci, OP_PORTSC, PORTSC_PTS(7) | PORTSC_PTW, portsc);
+               hw_write(ci, OP_PORTSC, PORTSC_STS, sts);
+       }
+}
+
 /**
  * hw_device_reset: resets chip (execute without interruption)
  * @ci: the controller
   *
  * This function returns an error code
  */
-int hw_device_reset(struct ci13xxx *ci, u32 mode)
+int hw_device_reset(struct ci_hdrc *ci, u32 mode)
 {
        /* should flush & stop before reset */
        hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
@@ -224,12 +264,13 @@ int hw_device_reset(struct ci13xxx *ci, u32 mode)
        while (hw_read(ci, OP_USBCMD, USBCMD_RST))
                udelay(10);             /* not RTOS friendly */
 
+       hw_phymode_configure(ci);
 
        if (ci->platdata->notify_event)
                ci->platdata->notify_event(ci,
-                       CI13XXX_CONTROLLER_RESET_EVENT);
+                       CI_HDRC_CONTROLLER_RESET_EVENT);
 
-       if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING)
+       if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
                hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
 
        /* USBMODE should be configured step by step */
@@ -251,7 +292,7 @@ int hw_device_reset(struct ci13xxx *ci, u32 mode)
  * ci_otg_role - pick role based on ID pin state
  * @ci: the controller
  */
-static enum ci_role ci_otg_role(struct ci13xxx *ci)
+static enum ci_role ci_otg_role(struct ci_hdrc *ci)
 {
        u32 sts = hw_read(ci, OP_OTGSC, ~0);
        enum ci_role role = sts & OTGSC_ID
@@ -267,7 +308,7 @@ static enum ci_role ci_otg_role(struct ci13xxx *ci)
  */
 static void ci_role_work(struct work_struct *work)
 {
-       struct ci13xxx *ci = container_of(work, struct ci13xxx, work);
+       struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work);
        enum ci_role role = ci_otg_role(ci);
 
        if (role != ci->role) {
@@ -283,7 +324,7 @@ static void ci_role_work(struct work_struct *work)
 
 static irqreturn_t ci_irq(int irq, void *data)
 {
-       struct ci13xxx *ci = data;
+       struct ci_hdrc *ci = data;
        irqreturn_t ret = IRQ_NONE;
        u32 otgsc = 0;
 
@@ -305,9 +346,9 @@ static irqreturn_t ci_irq(int irq, void *data)
 
 static DEFINE_IDA(ci_ida);
 
-struct platform_device *ci13xxx_add_device(struct device *dev,
+struct platform_device *ci_hdrc_add_device(struct device *dev,
                        struct resource *res, int nres,
-                       struct ci13xxx_platform_data *platdata)
+                       struct ci_hdrc_platform_data *platdata)
 {
        struct platform_device *pdev;
        int id, ret;
@@ -347,29 +388,33 @@ put_id:
        ida_simple_remove(&ci_ida, id);
        return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(ci13xxx_add_device);
+EXPORT_SYMBOL_GPL(ci_hdrc_add_device);
 
-void ci13xxx_remove_device(struct platform_device *pdev)
+void ci_hdrc_remove_device(struct platform_device *pdev)
 {
        int id = pdev->id;
        platform_device_unregister(pdev);
        ida_simple_remove(&ci_ida, id);
 }
-EXPORT_SYMBOL_GPL(ci13xxx_remove_device);
+EXPORT_SYMBOL_GPL(ci_hdrc_remove_device);
 
 static int ci_hdrc_probe(struct platform_device *pdev)
 {
        struct device   *dev = &pdev->dev;
-       struct ci13xxx  *ci;
+       struct ci_hdrc  *ci;
        struct resource *res;
        void __iomem    *base;
        int             ret;
+       enum usb_dr_mode dr_mode;
 
        if (!dev->platform_data) {
                dev_err(dev, "platform data missing\n");
                return -ENODEV;
        }
 
+       if (!dev->of_node && dev->parent)
+               dev->of_node = dev->parent->of_node;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(dev, res);
        if (IS_ERR(base))
@@ -409,14 +454,28 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       if (!ci->platdata->phy_mode)
+               ci->platdata->phy_mode = of_usb_get_phy_mode(dev->of_node);
+
+       if (!ci->platdata->dr_mode)
+               ci->platdata->dr_mode = of_usb_get_dr_mode(dev->of_node);
+
+       if (ci->platdata->dr_mode == USB_DR_MODE_UNKNOWN)
+               ci->platdata->dr_mode = USB_DR_MODE_OTG;
+
+       dr_mode = ci->platdata->dr_mode;
        /* initialize role(s) before the interrupt is requested */
-       ret = ci_hdrc_host_init(ci);
-       if (ret)
-               dev_info(dev, "doesn't support host\n");
+       if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
+               ret = ci_hdrc_host_init(ci);
+               if (ret)
+                       dev_info(dev, "doesn't support host\n");
+       }
 
-       ret = ci_hdrc_gadget_init(ci);
-       if (ret)
-               dev_info(dev, "doesn't support gadget\n");
+       if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
+               ret = ci_hdrc_gadget_init(ci);
+               if (ret)
+                       dev_info(dev, "doesn't support gadget\n");
+       }
 
        if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
                dev_err(dev, "no supported roles\n");
@@ -467,7 +526,7 @@ rm_wq:
 
 static int ci_hdrc_remove(struct platform_device *pdev)
 {
-       struct ci13xxx *ci = platform_get_drvdata(pdev);
+       struct ci_hdrc *ci = platform_get_drvdata(pdev);
 
        dbg_remove_files(ci);
        flush_workqueue(ci->wq);
index 36a7063a6cba04f55e1b25ea062f2896f9524dda..96d899aee473b943880972aac399178210bf1c1d 100644 (file)
@@ -18,7 +18,7 @@
  */
 static int ci_device_show(struct seq_file *s, void *data)
 {
-       struct ci13xxx *ci = s->private;
+       struct ci_hdrc *ci = s->private;
        struct usb_gadget *gadget = &ci->gadget;
 
        seq_printf(s, "speed             = %d\n", gadget->speed);
@@ -58,7 +58,7 @@ static const struct file_operations ci_device_fops = {
  */
 static int ci_port_test_show(struct seq_file *s, void *data)
 {
-       struct ci13xxx *ci = s->private;
+       struct ci_hdrc *ci = s->private;
        unsigned long flags;
        unsigned mode;
 
@@ -78,7 +78,7 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
                                  size_t count, loff_t *ppos)
 {
        struct seq_file *s = file->private_data;
-       struct ci13xxx *ci = s->private;
+       struct ci_hdrc *ci = s->private;
        unsigned long flags;
        unsigned mode;
        char buf[32];
@@ -115,7 +115,7 @@ static const struct file_operations ci_port_test_fops = {
  */
 static int ci_qheads_show(struct seq_file *s, void *data)
 {
-       struct ci13xxx *ci = s->private;
+       struct ci_hdrc *ci = s->private;
        unsigned long flags;
        unsigned i, j;
 
@@ -126,15 +126,15 @@ static int ci_qheads_show(struct seq_file *s, void *data)
 
        spin_lock_irqsave(&ci->lock, flags);
        for (i = 0; i < ci->hw_ep_max/2; i++) {
-               struct ci13xxx_ep *mEpRx = &ci->ci13xxx_ep[i];
-               struct ci13xxx_ep *mEpTx =
-                       &ci->ci13xxx_ep[i + ci->hw_ep_max/2];
+               struct ci_hw_ep *hweprx = &ci->ci_hw_ep[i];
+               struct ci_hw_ep *hweptx =
+                       &ci->ci_hw_ep[i + ci->hw_ep_max/2];
                seq_printf(s, "EP=%02i: RX=%08X TX=%08X\n",
-                          i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
-               for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++)
+                          i, (u32)hweprx->qh.dma, (u32)hweptx->qh.dma);
+               for (j = 0; j < (sizeof(struct ci_hw_qh)/sizeof(u32)); j++)
                        seq_printf(s, " %04X:    %08X    %08X\n", j,
-                                  *((u32 *)mEpRx->qh.ptr + j),
-                                  *((u32 *)mEpTx->qh.ptr + j));
+                                  *((u32 *)hweprx->qh.ptr + j),
+                                  *((u32 *)hweptx->qh.ptr + j));
        }
        spin_unlock_irqrestore(&ci->lock, flags);
 
@@ -158,11 +158,12 @@ static const struct file_operations ci_qheads_fops = {
  */
 static int ci_requests_show(struct seq_file *s, void *data)
 {
-       struct ci13xxx *ci = s->private;
+       struct ci_hdrc *ci = s->private;
        unsigned long flags;
        struct list_head   *ptr = NULL;
-       struct ci13xxx_req *req = NULL;
-       unsigned i, j, qsize = sizeof(struct ci13xxx_td)/sizeof(u32);
+       struct ci_hw_req *req = NULL;
+       struct td_node *node, *tmpnode;
+       unsigned i, j, qsize = sizeof(struct ci_hw_td)/sizeof(u32);
 
        if (ci->role != CI_ROLE_GADGET) {
                seq_printf(s, "not in gadget mode\n");
@@ -171,16 +172,20 @@ static int ci_requests_show(struct seq_file *s, void *data)
 
        spin_lock_irqsave(&ci->lock, flags);
        for (i = 0; i < ci->hw_ep_max; i++)
-               list_for_each(ptr, &ci->ci13xxx_ep[i].qh.queue) {
-                       req = list_entry(ptr, struct ci13xxx_req, queue);
-
-                       seq_printf(s, "EP=%02i: TD=%08X %s\n",
-                                  i % (ci->hw_ep_max / 2), (u32)req->dma,
-                                  ((i < ci->hw_ep_max/2) ? "RX" : "TX"));
-
-                       for (j = 0; j < qsize; j++)
-                               seq_printf(s, " %04X:    %08X\n", j,
-                                          *((u32 *)req->ptr + j));
+               list_for_each(ptr, &ci->ci_hw_ep[i].qh.queue) {
+                       req = list_entry(ptr, struct ci_hw_req, queue);
+
+                       list_for_each_entry_safe(node, tmpnode, &req->tds, td) {
+                               seq_printf(s, "EP=%02i: TD=%08X %s\n",
+                                          i % (ci->hw_ep_max / 2),
+                                          (u32)node->dma,
+                                          ((i < ci->hw_ep_max/2) ?
+                                          "RX" : "TX"));
+
+                               for (j = 0; j < qsize; j++)
+                                       seq_printf(s, " %04X:    %08X\n", j,
+                                                  *((u32 *)node->ptr + j));
+                       }
                }
        spin_unlock_irqrestore(&ci->lock, flags);
 
@@ -201,7 +206,7 @@ static const struct file_operations ci_requests_fops = {
 
 static int ci_role_show(struct seq_file *s, void *data)
 {
-       struct ci13xxx *ci = s->private;
+       struct ci_hdrc *ci = s->private;
 
        seq_printf(s, "%s\n", ci_role(ci)->name);
 
@@ -212,7 +217,7 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
                             size_t count, loff_t *ppos)
 {
        struct seq_file *s = file->private_data;
-       struct ci13xxx *ci = s->private;
+       struct ci_hdrc *ci = s->private;
        enum ci_role role;
        char buf[8];
        int ret;
@@ -254,7 +259,7 @@ static const struct file_operations ci_role_fops = {
  *
  * This function returns an error code
  */
-int dbg_create_files(struct ci13xxx *ci)
+int dbg_create_files(struct ci_hdrc *ci)
 {
        struct dentry *dent;
 
@@ -295,7 +300,7 @@ err:
  * dbg_remove_files: destroys the attribute interface
  * @ci: device
  */
-void dbg_remove_files(struct ci13xxx *ci)
+void dbg_remove_files(struct ci_hdrc *ci)
 {
        debugfs_remove_recursive(ci->debugfs);
 }
index 7ca6ca0a24a5056726c1c9af68baa3c34410c744..e16478c4a943afc96bf64c05cc6730451fff8f47 100644 (file)
 #define __DRIVERS_USB_CHIPIDEA_DEBUG_H
 
 #ifdef CONFIG_USB_CHIPIDEA_DEBUG
-int dbg_create_files(struct ci13xxx *ci);
-void dbg_remove_files(struct ci13xxx *ci);
+int dbg_create_files(struct ci_hdrc *ci);
+void dbg_remove_files(struct ci_hdrc *ci);
 #else
-static inline int dbg_create_files(struct ci13xxx *ci)
+static inline int dbg_create_files(struct ci_hdrc *ci)
 {
        return 0;
 }
 
-static inline void dbg_remove_files(struct ci13xxx *ci)
+static inline void dbg_remove_files(struct ci_hdrc *ci)
 {
 }
 #endif
index 8e9d31277c436ecce3fdd61ccb78031148a3e53e..40d0fda4f66c8f696b0d935fa464ce3632c09020 100644 (file)
 
 static struct hc_driver __read_mostly ci_ehci_hc_driver;
 
-static irqreturn_t host_irq(struct ci13xxx *ci)
+static irqreturn_t host_irq(struct ci_hdrc *ci)
 {
        return usb_hcd_irq(ci->irq, ci->hcd);
 }
 
-static int host_start(struct ci13xxx *ci)
+static int host_start(struct ci_hdrc *ci)
 {
        struct usb_hcd *hcd;
        struct ehci_hcd *ehci;
@@ -70,13 +70,13 @@ static int host_start(struct ci13xxx *ci)
        else
                ci->hcd = hcd;
 
-       if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING)
+       if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
                hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
 
        return ret;
 }
 
-static void host_stop(struct ci13xxx *ci)
+static void host_stop(struct ci_hdrc *ci)
 {
        struct usb_hcd *hcd = ci->hcd;
 
@@ -84,7 +84,7 @@ static void host_stop(struct ci13xxx *ci)
        usb_put_hcd(hcd);
 }
 
-int ci_hdrc_host_init(struct ci13xxx *ci)
+int ci_hdrc_host_init(struct ci_hdrc *ci)
 {
        struct ci_role_driver *rdrv;
 
index 761fb1fd6d996beae13d48dd499acac5a2283fce..058875c153334db7b0807a3cb2e0926b60c065e8 100644 (file)
@@ -3,11 +3,11 @@
 
 #ifdef CONFIG_USB_CHIPIDEA_HOST
 
-int ci_hdrc_host_init(struct ci13xxx *ci);
+int ci_hdrc_host_init(struct ci_hdrc *ci);
 
 #else
 
-static inline int ci_hdrc_host_init(struct ci13xxx *ci)
+static inline int ci_hdrc_host_init(struct ci_hdrc *ci)
 {
        return -ENXIO;
 }
index b501346484aeb16ab5e92e91e9aaf2458bf04784..e475fcda1d6829213e2b6c22c5d2c0c7302c8671 100644 (file)
@@ -61,7 +61,7 @@ static inline int hw_ep_bit(int num, int dir)
        return num + (dir ? 16 : 0);
 }
 
-static inline int ep_to_bit(struct ci13xxx *ci, int n)
+static inline int ep_to_bit(struct ci_hdrc *ci, int n)
 {
        int fill = 16 - ci->hw_ep_max / 2;
 
@@ -77,7 +77,7 @@ static inline int ep_to_bit(struct ci13xxx *ci, int n)
  *
  * This function returns an error code
  */
-static int hw_device_state(struct ci13xxx *ci, u32 dma)
+static int hw_device_state(struct ci_hdrc *ci, u32 dma)
 {
        if (dma) {
                hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
@@ -97,7 +97,7 @@ static int hw_device_state(struct ci13xxx *ci, u32 dma)
  *
  * This function returns an error code
  */
-static int hw_ep_flush(struct ci13xxx *ci, int num, int dir)
+static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
 {
        int n = hw_ep_bit(num, dir);
 
@@ -118,7 +118,7 @@ static int hw_ep_flush(struct ci13xxx *ci, int num, int dir)
  *
  * This function returns an error code
  */
-static int hw_ep_disable(struct ci13xxx *ci, int num, int dir)
+static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
 {
        hw_ep_flush(ci, num, dir);
        hw_write(ci, OP_ENDPTCTRL + num,
@@ -134,7 +134,7 @@ static int hw_ep_disable(struct ci13xxx *ci, int num, int dir)
  *
  * This function returns an error code
  */
-static int hw_ep_enable(struct ci13xxx *ci, int num, int dir, int type)
+static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
 {
        u32 mask, data;
 
@@ -168,7 +168,7 @@ static int hw_ep_enable(struct ci13xxx *ci, int num, int dir, int type)
  *
  * This function returns 1 if endpoint halted
  */
-static int hw_ep_get_halt(struct ci13xxx *ci, int num, int dir)
+static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
 {
        u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
 
@@ -182,7 +182,7 @@ static int hw_ep_get_halt(struct ci13xxx *ci, int num, int dir)
  *
  * This function returns setup status
  */
-static int hw_test_and_clear_setup_status(struct ci13xxx *ci, int n)
+static int hw_test_and_clear_setup_status(struct ci_hdrc *ci, int n)
 {
        n = ep_to_bit(ci, n);
        return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n));
@@ -196,7 +196,7 @@ static int hw_test_and_clear_setup_status(struct ci13xxx *ci, int n)
  *
  * This function returns an error code
  */
-static int hw_ep_prime(struct ci13xxx *ci, int num, int dir, int is_ctrl)
+static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
 {
        int n = hw_ep_bit(num, dir);
 
@@ -223,13 +223,13 @@ static int hw_ep_prime(struct ci13xxx *ci, int num, int dir, int is_ctrl)
  *
  * This function returns an error code
  */
-static int hw_ep_set_halt(struct ci13xxx *ci, int num, int dir, int value)
+static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
 {
        if (value != 0 && value != 1)
                return -EINVAL;
 
        do {
-               enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
+               enum ci_hw_regs reg = OP_ENDPTCTRL + num;
                u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
                u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
 
@@ -246,7 +246,7 @@ static int hw_ep_set_halt(struct ci13xxx *ci, int num, int dir, int value)
  *
  * This function returns true if high speed port
  */
-static int hw_port_is_high_speed(struct ci13xxx *ci)
+static int hw_port_is_high_speed(struct ci_hdrc *ci)
 {
        return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
                hw_read(ci, OP_PORTSC, PORTSC_HSP);
@@ -257,7 +257,7 @@ static int hw_port_is_high_speed(struct ci13xxx *ci)
  *
  * This function returns register data
  */
-static u32 hw_read_intr_enable(struct ci13xxx *ci)
+static u32 hw_read_intr_enable(struct ci_hdrc *ci)
 {
        return hw_read(ci, OP_USBINTR, ~0);
 }
@@ -267,7 +267,7 @@ static u32 hw_read_intr_enable(struct ci13xxx *ci)
  *
  * This function returns register data
  */
-static u32 hw_read_intr_status(struct ci13xxx *ci)
+static u32 hw_read_intr_status(struct ci_hdrc *ci)
 {
        return hw_read(ci, OP_USBSTS, ~0);
 }
@@ -279,7 +279,7 @@ static u32 hw_read_intr_status(struct ci13xxx *ci)
  *
  * This function returns complete status
  */
-static int hw_test_and_clear_complete(struct ci13xxx *ci, int n)
+static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
 {
        n = ep_to_bit(ci, n);
        return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
@@ -291,7 +291,7 @@ static int hw_test_and_clear_complete(struct ci13xxx *ci, int n)
  *
  * This function returns active interrutps
  */
-static u32 hw_test_and_clear_intr_active(struct ci13xxx *ci)
+static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
 {
        u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
 
@@ -305,7 +305,7 @@ static u32 hw_test_and_clear_intr_active(struct ci13xxx *ci)
  *
  * This function returns guard value
  */
-static int hw_test_and_clear_setup_guard(struct ci13xxx *ci)
+static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
 {
        return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
 }
@@ -316,7 +316,7 @@ static int hw_test_and_clear_setup_guard(struct ci13xxx *ci)
  *
  * This function returns guard value
  */
-static int hw_test_and_set_setup_guard(struct ci13xxx *ci)
+static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
 {
        return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
 }
@@ -328,7 +328,7 @@ static int hw_test_and_set_setup_guard(struct ci13xxx *ci)
  * This function explicitly sets the address, without the "USBADRA" (advance)
  * feature, which is not supported by older versions of the controller.
  */
-static void hw_usb_set_address(struct ci13xxx *ci, u8 value)
+static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
 {
        hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
                 value << __ffs(DEVICEADDR_USBADR));
@@ -340,7 +340,7 @@ static void hw_usb_set_address(struct ci13xxx *ci, u8 value)
  *
  * This function returns an error code
  */
-static int hw_usb_reset(struct ci13xxx *ci)
+static int hw_usb_reset(struct ci_hdrc *ci)
 {
        hw_usb_set_address(ci, 0);
 
@@ -368,11 +368,60 @@ static int hw_usb_reset(struct ci13xxx *ci)
 /******************************************************************************
  * UTIL block
  *****************************************************************************/
+
+static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
+                         unsigned length)
+{
+       int i;
+       u32 temp;
+       struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
+                                                 GFP_ATOMIC);
+
+       if (node == NULL)
+               return -ENOMEM;
+
+       node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC,
+                                  &node->dma);
+       if (node->ptr == NULL) {
+               kfree(node);
+               return -ENOMEM;
+       }
+
+       memset(node->ptr, 0, sizeof(struct ci_hw_td));
+       node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
+       node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
+       node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
+
+       temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+       if (length) {
+               node->ptr->page[0] = cpu_to_le32(temp);
+               for (i = 1; i < TD_PAGE_COUNT; i++) {
+                       u32 page = temp + i * CI_HDRC_PAGE_SIZE;
+                       page &= ~TD_RESERVED_MASK;
+                       node->ptr->page[i] = cpu_to_le32(page);
+               }
+       }
+
+       hwreq->req.actual += length;
+
+       if (!list_empty(&hwreq->tds)) {
+               /* get the last entry */
+               lastnode = list_entry(hwreq->tds.prev,
+                               struct td_node, td);
+               lastnode->ptr->next = cpu_to_le32(node->dma);
+       }
+
+       INIT_LIST_HEAD(&node->td);
+       list_add_tail(&node->td, &hwreq->tds);
+
+       return 0;
+}
+
 /**
  * _usb_addr: calculates endpoint address from direction & number
  * @ep:  endpoint
  */
-static inline u8 _usb_addr(struct ci13xxx_ep *ep)
+static inline u8 _usb_addr(struct ci_hw_ep *ep)
 {
        return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
 }
@@ -380,75 +429,73 @@ static inline u8 _usb_addr(struct ci13xxx_ep *ep)
 /**
  * _hardware_queue: configures a request at hardware level
  * @gadget: gadget
- * @mEp:    endpoint
+ * @hwep:   endpoint
  *
  * This function returns an error code
  */
-static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 {
-       struct ci13xxx *ci = mEp->ci;
-       unsigned i;
+       struct ci_hdrc *ci = hwep->ci;
        int ret = 0;
-       unsigned length = mReq->req.length;
+       unsigned rest = hwreq->req.length;
+       int pages = TD_PAGE_COUNT;
+       struct td_node *firstnode, *lastnode;
 
        /* don't queue twice */
-       if (mReq->req.status == -EALREADY)
+       if (hwreq->req.status == -EALREADY)
                return -EALREADY;
 
-       mReq->req.status = -EALREADY;
-
-       if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
-               mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
-                                          &mReq->zdma);
-               if (mReq->zptr == NULL)
-                       return -ENOMEM;
+       hwreq->req.status = -EALREADY;
 
-               memset(mReq->zptr, 0, sizeof(*mReq->zptr));
-               mReq->zptr->next    = cpu_to_le32(TD_TERMINATE);
-               mReq->zptr->token   = cpu_to_le32(TD_STATUS_ACTIVE);
-               if (!mReq->req.no_interrupt)
-                       mReq->zptr->token   |= cpu_to_le32(TD_IOC);
-       }
-       ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir);
+       ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
        if (ret)
                return ret;
 
        /*
-        * TD configuration
-        * TODO - handle requests which spawns into several TDs
+        * The first buffer could be not page aligned.
+        * In that case we have to span into one extra td.
         */
-       memset(mReq->ptr, 0, sizeof(*mReq->ptr));
-       mReq->ptr->token    = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
-       mReq->ptr->token   &= cpu_to_le32(TD_TOTAL_BYTES);
-       mReq->ptr->token   |= cpu_to_le32(TD_STATUS_ACTIVE);
-       if (mReq->zptr) {
-               mReq->ptr->next    = cpu_to_le32(mReq->zdma);
-       } else {
-               mReq->ptr->next    = cpu_to_le32(TD_TERMINATE);
-               if (!mReq->req.no_interrupt)
-                       mReq->ptr->token  |= cpu_to_le32(TD_IOC);
-       }
-       mReq->ptr->page[0]  = cpu_to_le32(mReq->req.dma);
-       for (i = 1; i < TD_PAGE_COUNT; i++) {
-               u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE;
-               page &= ~TD_RESERVED_MASK;
-               mReq->ptr->page[i] = cpu_to_le32(page);
+       if (hwreq->req.dma % PAGE_SIZE)
+               pages--;
+
+       if (rest == 0)
+               add_td_to_list(hwep, hwreq, 0);
+
+       while (rest > 0) {
+               unsigned count = min(hwreq->req.length - hwreq->req.actual,
+                                       (unsigned)(pages * CI_HDRC_PAGE_SIZE));
+               add_td_to_list(hwep, hwreq, count);
+               rest -= count;
        }
 
+       if (hwreq->req.zero && hwreq->req.length
+           && (hwreq->req.length % hwep->ep.maxpacket == 0))
+               add_td_to_list(hwep, hwreq, 0);
+
+       firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
+
+       lastnode = list_entry(hwreq->tds.prev,
+               struct td_node, td);
+
+       lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
+       if (!hwreq->req.no_interrupt)
+               lastnode->ptr->token |= cpu_to_le32(TD_IOC);
        wmb();
 
-       if (!list_empty(&mEp->qh.queue)) {
-               struct ci13xxx_req *mReqPrev;
-               int n = hw_ep_bit(mEp->num, mEp->dir);
+       hwreq->req.actual = 0;
+       if (!list_empty(&hwep->qh.queue)) {
+               struct ci_hw_req *hwreqprev;
+               int n = hw_ep_bit(hwep->num, hwep->dir);
                int tmp_stat;
-               u32 next = mReq->dma & TD_ADDR_MASK;
-
-               mReqPrev = list_entry(mEp->qh.queue.prev,
-                               struct ci13xxx_req, queue);
-               if (mReqPrev->zptr)
-                       mReqPrev->zptr->next = cpu_to_le32(next);
-               else
-                       mReqPrev->ptr->next = cpu_to_le32(next);
+               struct td_node *prevlastnode;
+               u32 next = firstnode->dma & TD_ADDR_MASK;
+
+               hwreqprev = list_entry(hwep->qh.queue.prev,
+                               struct ci_hw_req, queue);
+               prevlastnode = list_entry(hwreqprev->tds.prev,
+                               struct td_node, td);
+
+               prevlastnode->ptr->next = cpu_to_le32(next);
                wmb();
                if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
                        goto done;
@@ -462,99 +509,152 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
        }
 
        /*  QH configuration */
-       mEp->qh.ptr->td.next   = cpu_to_le32(mReq->dma);    /* TERMINATE = 0 */
-       mEp->qh.ptr->td.token &=
+       hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
+       hwep->qh.ptr->td.token &=
                cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
 
+       if (hwep->type == USB_ENDPOINT_XFER_ISOC) {
+               u32 mul = hwreq->req.length / hwep->ep.maxpacket;
+
+               if (hwreq->req.length % hwep->ep.maxpacket)
+                       mul++;
+               hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
+       }
+
        wmb();   /* synchronize before ep prime */
 
-       ret = hw_ep_prime(ci, mEp->num, mEp->dir,
-                          mEp->type == USB_ENDPOINT_XFER_CONTROL);
+       ret = hw_ep_prime(ci, hwep->num, hwep->dir,
+                          hwep->type == USB_ENDPOINT_XFER_CONTROL);
 done:
        return ret;
 }
 
+/*
+ * free_pending_td: remove a pending request for the endpoint
+ * @hwep: endpoint
+ */
+static void free_pending_td(struct ci_hw_ep *hwep)
+{
+       struct td_node *pending = hwep->pending_td;
+
+       dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
+       hwep->pending_td = NULL;
+       kfree(pending);
+}
+
 /**
  * _hardware_dequeue: handles a request at hardware level
  * @gadget: gadget
- * @mEp:    endpoint
+ * @hwep:   endpoint
  *
  * This function returns an error code
  */
-static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 {
-       u32 tmptoken = le32_to_cpu(mReq->ptr->token);
+       u32 tmptoken;
+       struct td_node *node, *tmpnode;
+       unsigned remaining_length;
+       unsigned actual = hwreq->req.length;
 
-       if (mReq->req.status != -EALREADY)
+       if (hwreq->req.status != -EALREADY)
                return -EINVAL;
 
-       if ((TD_STATUS_ACTIVE & tmptoken) != 0)
-               return -EBUSY;
+       hwreq->req.status = 0;
 
-       if (mReq->zptr) {
-               if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->zptr->token) != 0)
+       list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+               tmptoken = le32_to_cpu(node->ptr->token);
+               if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
+                       hwreq->req.status = -EALREADY;
                        return -EBUSY;
-               dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
-               mReq->zptr = NULL;
-       }
+               }
 
-       mReq->req.status = 0;
+               remaining_length = (tmptoken & TD_TOTAL_BYTES);
+               remaining_length >>= __ffs(TD_TOTAL_BYTES);
+               actual -= remaining_length;
+
+               hwreq->req.status = tmptoken & TD_STATUS;
+               if ((TD_STATUS_HALTED & hwreq->req.status)) {
+                       hwreq->req.status = -EPIPE;
+                       break;
+               } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
+                       hwreq->req.status = -EPROTO;
+                       break;
+               } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
+                       hwreq->req.status = -EILSEQ;
+                       break;
+               }
 
-       usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir);
+               if (remaining_length) {
+                       if (hwep->dir) {
+                               hwreq->req.status = -EPROTO;
+                               break;
+                       }
+               }
+               /*
+                * As the hardware could still address the freed td
+                * which will run the udc unusable, the cleanup of the
+                * td has to be delayed by one.
+                */
+               if (hwep->pending_td)
+                       free_pending_td(hwep);
 
-       mReq->req.status = tmptoken & TD_STATUS;
-       if ((TD_STATUS_HALTED & mReq->req.status) != 0)
-               mReq->req.status = -1;
-       else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
-               mReq->req.status = -1;
-       else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
-               mReq->req.status = -1;
+               hwep->pending_td = node;
+               list_del_init(&node->td);
+       }
 
-       mReq->req.actual   = tmptoken & TD_TOTAL_BYTES;
-       mReq->req.actual >>= __ffs(TD_TOTAL_BYTES);
-       mReq->req.actual   = mReq->req.length - mReq->req.actual;
-       mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
+       usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
 
-       return mReq->req.actual;
+       hwreq->req.actual += actual;
+
+       if (hwreq->req.status)
+               return hwreq->req.status;
+
+       return hwreq->req.actual;
 }
 
 /**
  * _ep_nuke: dequeues all endpoint requests
- * @mEp: endpoint
+ * @hwep: endpoint
  *
  * This function returns an error code
  * Caller must hold lock
  */
-static int _ep_nuke(struct ci13xxx_ep *mEp)
-__releases(mEp->lock)
-__acquires(mEp->lock)
+static int _ep_nuke(struct ci_hw_ep *hwep)
+__releases(hwep->lock)
+__acquires(hwep->lock)
 {
-       if (mEp == NULL)
+       struct td_node *node, *tmpnode;
+       if (hwep == NULL)
                return -EINVAL;
 
-       hw_ep_flush(mEp->ci, mEp->num, mEp->dir);
+       hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
-       while (!list_empty(&mEp->qh.queue)) {
+       while (!list_empty(&hwep->qh.queue)) {
 
                /* pop oldest request */
-               struct ci13xxx_req *mReq = \
-                       list_entry(mEp->qh.queue.next,
-                                  struct ci13xxx_req, queue);
-
-               if (mReq->zptr) {
-                       dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
-                       mReq->zptr = NULL;
+               struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
+                                                    struct ci_hw_req, queue);
+
+               list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+                       dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+                       list_del_init(&node->td);
+                       node->ptr = NULL;
+                       kfree(node);
                }
 
-               list_del_init(&mReq->queue);
-               mReq->req.status = -ESHUTDOWN;
+               list_del_init(&hwreq->queue);
+               hwreq->req.status = -ESHUTDOWN;
 
-               if (mReq->req.complete != NULL) {
-                       spin_unlock(mEp->lock);
-                       mReq->req.complete(&mEp->ep, &mReq->req);
-                       spin_lock(mEp->lock);
+               if (hwreq->req.complete != NULL) {
+                       spin_unlock(hwep->lock);
+                       hwreq->req.complete(&hwep->ep, &hwreq->req);
+                       spin_lock(hwep->lock);
                }
        }
+
+       if (hwep->pending_td)
+               free_pending_td(hwep);
+
        return 0;
 }
 
@@ -567,7 +667,7 @@ __acquires(mEp->lock)
 static int _gadget_stop_activity(struct usb_gadget *gadget)
 {
        struct usb_ep *ep;
-       struct ci13xxx    *ci = container_of(gadget, struct ci13xxx, gadget);
+       struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
        unsigned long flags;
 
        spin_lock_irqsave(&ci->lock, flags);
@@ -608,7 +708,7 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
  *
  * This function resets USB engine after a bus reset occurred
  */
-static void isr_reset_handler(struct ci13xxx *ci)
+static void isr_reset_handler(struct ci_hdrc *ci)
 __releases(ci->lock)
 __acquires(ci->lock)
 {
@@ -658,47 +758,48 @@ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
 static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
                    gfp_t __maybe_unused gfp_flags)
 {
-       struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-       struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-       struct ci13xxx *ci = mEp->ci;
+       struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
+       struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
+       struct ci_hdrc *ci = hwep->ci;
        int retval = 0;
 
-       if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
+       if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
                return -EINVAL;
 
-       if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+       if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
                if (req->length)
-                       mEp = (ci->ep0_dir == RX) ?
+                       hwep = (ci->ep0_dir == RX) ?
                               ci->ep0out : ci->ep0in;
-               if (!list_empty(&mEp->qh.queue)) {
-                       _ep_nuke(mEp);
+               if (!list_empty(&hwep->qh.queue)) {
+                       _ep_nuke(hwep);
                        retval = -EOVERFLOW;
-                       dev_warn(mEp->ci->dev, "endpoint ctrl %X nuked\n",
-                                _usb_addr(mEp));
+                       dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
+                                _usb_addr(hwep));
                }
        }
 
-       /* first nuke then test link, e.g. previous status has not sent */
-       if (!list_empty(&mReq->queue)) {
-               dev_err(mEp->ci->dev, "request already in queue\n");
-               return -EBUSY;
+       if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
+           hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
+               dev_err(hwep->ci->dev, "request length too big for isochronous\n");
+               return -EMSGSIZE;
        }
 
-       if (req->length > (TD_PAGE_COUNT - 1) * CI13XXX_PAGE_SIZE) {
-               dev_err(mEp->ci->dev, "request bigger than one td\n");
-               return -EMSGSIZE;
+       /* first nuke then test link, e.g. previous status has not sent */
+       if (!list_empty(&hwreq->queue)) {
+               dev_err(hwep->ci->dev, "request already in queue\n");
+               return -EBUSY;
        }
 
        /* push request */
-       mReq->req.status = -EINPROGRESS;
-       mReq->req.actual = 0;
+       hwreq->req.status = -EINPROGRESS;
+       hwreq->req.actual = 0;
 
-       retval = _hardware_enqueue(mEp, mReq);
+       retval = _hardware_enqueue(hwep, hwreq);
 
        if (retval == -EALREADY)
                retval = 0;
        if (!retval)
-               list_add_tail(&mReq->queue, &mEp->qh.queue);
+               list_add_tail(&hwreq->queue, &hwep->qh.queue);
 
        return retval;
 }
@@ -710,22 +811,22 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
  *
  * This function returns an error code
  */
-static int isr_get_status_response(struct ci13xxx *ci,
+static int isr_get_status_response(struct ci_hdrc *ci,
                                   struct usb_ctrlrequest *setup)
-__releases(mEp->lock)
-__acquires(mEp->lock)
+__releases(hwep->lock)
+__acquires(hwep->lock)
 {
-       struct ci13xxx_ep *mEp = ci->ep0in;
+       struct ci_hw_ep *hwep = ci->ep0in;
        struct usb_request *req = NULL;
        gfp_t gfp_flags = GFP_ATOMIC;
        int dir, num, retval;
 
-       if (mEp == NULL || setup == NULL)
+       if (hwep == NULL || setup == NULL)
                return -EINVAL;
 
-       spin_unlock(mEp->lock);
-       req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
-       spin_lock(mEp->lock);
+       spin_unlock(hwep->lock);
+       req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
+       spin_lock(hwep->lock);
        if (req == NULL)
                return -ENOMEM;
 
@@ -750,7 +851,7 @@ __acquires(mEp->lock)
        }
        /* else do nothing; reserved for future use */
 
-       retval = _ep_queue(&mEp->ep, req, gfp_flags);
+       retval = _ep_queue(&hwep->ep, req, gfp_flags);
        if (retval)
                goto err_free_buf;
 
@@ -759,9 +860,9 @@ __acquires(mEp->lock)
  err_free_buf:
        kfree(req->buf);
  err_free_req:
-       spin_unlock(mEp->lock);
-       usb_ep_free_request(&mEp->ep, req);
-       spin_lock(mEp->lock);
+       spin_unlock(hwep->lock);
+       usb_ep_free_request(&hwep->ep, req);
+       spin_lock(hwep->lock);
        return retval;
 }
 
@@ -776,7 +877,7 @@ __acquires(mEp->lock)
 static void
 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct ci13xxx *ci = req->context;
+       struct ci_hdrc *ci = req->context;
        unsigned long flags;
 
        if (ci->setaddr) {
@@ -796,48 +897,48 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
  *
  * This function returns an error code
  */
-static int isr_setup_status_phase(struct ci13xxx *ci)
+static int isr_setup_status_phase(struct ci_hdrc *ci)
 {
        int retval;
-       struct ci13xxx_ep *mEp;
+       struct ci_hw_ep *hwep;
 
-       mEp = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
+       hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
        ci->status->context = ci;
        ci->status->complete = isr_setup_status_complete;
 
-       retval = _ep_queue(&mEp->ep, ci->status, GFP_ATOMIC);
+       retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
 
        return retval;
 }
 
 /**
  * isr_tr_complete_low: transaction complete low level handler
- * @mEp: endpoint
+ * @hwep: endpoint
  *
  * This function returns an error code
  * Caller must hold lock
  */
-static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
-__releases(mEp->lock)
-__acquires(mEp->lock)
+static int isr_tr_complete_low(struct ci_hw_ep *hwep)
+__releases(hwep->lock)
+__acquires(hwep->lock)
 {
-       struct ci13xxx_req *mReq, *mReqTemp;
-       struct ci13xxx_ep *mEpTemp = mEp;
+       struct ci_hw_req *hwreq, *hwreqtemp;
+       struct ci_hw_ep *hweptemp = hwep;
        int retval = 0;
 
-       list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
+       list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
                        queue) {
-               retval = _hardware_dequeue(mEp, mReq);
+               retval = _hardware_dequeue(hwep, hwreq);
                if (retval < 0)
                        break;
-               list_del_init(&mReq->queue);
-               if (mReq->req.complete != NULL) {
-                       spin_unlock(mEp->lock);
-                       if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-                                       mReq->req.length)
-                               mEpTemp = mEp->ci->ep0in;
-                       mReq->req.complete(&mEpTemp->ep, &mReq->req);
-                       spin_lock(mEp->lock);
+               list_del_init(&hwreq->queue);
+               if (hwreq->req.complete != NULL) {
+                       spin_unlock(hwep->lock);
+                       if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
+                                       hwreq->req.length)
+                               hweptemp = hwep->ci->ep0in;
+                       hwreq->req.complete(&hweptemp->ep, &hwreq->req);
+                       spin_lock(hwep->lock);
                }
        }
 
@@ -853,7 +954,7 @@ __acquires(mEp->lock)
  *
  * This function handles traffic events
  */
-static void isr_tr_complete_handler(struct ci13xxx *ci)
+static void isr_tr_complete_handler(struct ci_hdrc *ci)
 __releases(ci->lock)
 __acquires(ci->lock)
 {
@@ -861,21 +962,21 @@ __acquires(ci->lock)
        u8 tmode = 0;
 
        for (i = 0; i < ci->hw_ep_max; i++) {
-               struct ci13xxx_ep *mEp  = &ci->ci13xxx_ep[i];
+               struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
                int type, num, dir, err = -EINVAL;
                struct usb_ctrlrequest req;
 
-               if (mEp->ep.desc == NULL)
+               if (hwep->ep.desc == NULL)
                        continue;   /* not configured */
 
                if (hw_test_and_clear_complete(ci, i)) {
-                       err = isr_tr_complete_low(mEp);
-                       if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+                       err = isr_tr_complete_low(hwep);
+                       if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
                                if (err > 0)   /* needs status phase */
                                        err = isr_setup_status_phase(ci);
                                if (err < 0) {
                                        spin_unlock(&ci->lock);
-                                       if (usb_ep_set_halt(&mEp->ep))
+                                       if (usb_ep_set_halt(&hwep->ep))
                                                dev_err(ci->dev,
                                                        "error: ep_set_halt\n");
                                        spin_lock(&ci->lock);
@@ -883,7 +984,7 @@ __acquires(ci->lock)
                        }
                }
 
-               if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
+               if (hwep->type != USB_ENDPOINT_XFER_CONTROL ||
                    !hw_test_and_clear_setup_status(ci, i))
                        continue;
 
@@ -902,7 +1003,7 @@ __acquires(ci->lock)
                /* read_setup_packet */
                do {
                        hw_test_and_set_setup_guard(ci);
-                       memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+                       memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
                } while (!hw_test_and_clear_setup_guard(ci));
 
                type = req.bRequestType;
@@ -921,10 +1022,10 @@ __acquires(ci->lock)
                                num &= USB_ENDPOINT_NUMBER_MASK;
                                if (dir) /* TX */
                                        num += ci->hw_ep_max/2;
-                               if (!ci->ci13xxx_ep[num].wedge) {
+                               if (!ci->ci_hw_ep[num].wedge) {
                                        spin_unlock(&ci->lock);
                                        err = usb_ep_clear_halt(
-                                               &ci->ci13xxx_ep[num].ep);
+                                               &ci->ci_hw_ep[num].ep);
                                        spin_lock(&ci->lock);
                                        if (err)
                                                break;
@@ -974,7 +1075,7 @@ __acquires(ci->lock)
                                        num += ci->hw_ep_max/2;
 
                                spin_unlock(&ci->lock);
-                               err = usb_ep_set_halt(&ci->ci13xxx_ep[num].ep);
+                               err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
                                spin_lock(&ci->lock);
                                if (!err)
                                        isr_setup_status_phase(ci);
@@ -1021,7 +1122,7 @@ delegate:
 
                if (err < 0) {
                        spin_unlock(&ci->lock);
-                       if (usb_ep_set_halt(&mEp->ep))
+                       if (usb_ep_set_halt(&hwep->ep))
                                dev_err(ci->dev, "error: ep_set_halt\n");
                        spin_lock(&ci->lock);
                }
@@ -1039,7 +1140,7 @@ delegate:
 static int ep_enable(struct usb_ep *ep,
                     const struct usb_endpoint_descriptor *desc)
 {
-       struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+       struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
        int retval = 0;
        unsigned long flags;
        u32 cap = 0;
@@ -1047,39 +1148,41 @@ static int ep_enable(struct usb_ep *ep,
        if (ep == NULL || desc == NULL)
                return -EINVAL;
 
-       spin_lock_irqsave(mEp->lock, flags);
+       spin_lock_irqsave(hwep->lock, flags);
 
        /* only internal SW should enable ctrl endpts */
 
-       mEp->ep.desc = desc;
+       hwep->ep.desc = desc;
 
-       if (!list_empty(&mEp->qh.queue))
-               dev_warn(mEp->ci->dev, "enabling a non-empty endpoint!\n");
+       if (!list_empty(&hwep->qh.queue))
+               dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
 
-       mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
-       mEp->num  = usb_endpoint_num(desc);
-       mEp->type = usb_endpoint_type(desc);
+       hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
+       hwep->num  = usb_endpoint_num(desc);
+       hwep->type = usb_endpoint_type(desc);
 
-       mEp->ep.maxpacket = usb_endpoint_maxp(desc);
+       hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
+       hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
 
-       if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+       if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
                cap |= QH_IOS;
-       if (mEp->num)
+       if (hwep->num)
                cap |= QH_ZLT;
-       cap |= (mEp->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
+       cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
 
-       mEp->qh.ptr->cap = cpu_to_le32(cap);
+       hwep->qh.ptr->cap = cpu_to_le32(cap);
 
-       mEp->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
+       hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
 
        /*
         * Enable endpoints in the HW other than ep0 as ep0
         * is always enabled
         */
-       if (mEp->num)
-               retval |= hw_ep_enable(mEp->ci, mEp->num, mEp->dir, mEp->type);
+       if (hwep->num)
+               retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
+                                      hwep->type);
 
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_unlock_irqrestore(hwep->lock, flags);
        return retval;
 }
 
@@ -1090,32 +1193,32 @@ static int ep_enable(struct usb_ep *ep,
  */
 static int ep_disable(struct usb_ep *ep)
 {
-       struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+       struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
        int direction, retval = 0;
        unsigned long flags;
 
        if (ep == NULL)
                return -EINVAL;
-       else if (mEp->ep.desc == NULL)
+       else if (hwep->ep.desc == NULL)
                return -EBUSY;
 
-       spin_lock_irqsave(mEp->lock, flags);
+       spin_lock_irqsave(hwep->lock, flags);
 
        /* only internal SW should disable ctrl endpts */
 
-       direction = mEp->dir;
+       direction = hwep->dir;
        do {
-               retval |= _ep_nuke(mEp);
-               retval |= hw_ep_disable(mEp->ci, mEp->num, mEp->dir);
+               retval |= _ep_nuke(hwep);
+               retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
 
-               if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-                       mEp->dir = (mEp->dir == TX) ? RX : TX;
+               if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+                       hwep->dir = (hwep->dir == TX) ? RX : TX;
 
-       } while (mEp->dir != direction);
+       } while (hwep->dir != direction);
 
-       mEp->ep.desc = NULL;
+       hwep->ep.desc = NULL;
 
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_unlock_irqrestore(hwep->lock, flags);
        return retval;
 }
 
@@ -1126,25 +1229,18 @@ static int ep_disable(struct usb_ep *ep)
  */
 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
 {
-       struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
-       struct ci13xxx_req *mReq = NULL;
+       struct ci_hw_req *hwreq = NULL;
 
        if (ep == NULL)
                return NULL;
 
-       mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
-       if (mReq != NULL) {
-               INIT_LIST_HEAD(&mReq->queue);
-
-               mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
-                                          &mReq->dma);
-               if (mReq->ptr == NULL) {
-                       kfree(mReq);
-                       mReq = NULL;
-               }
+       hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
+       if (hwreq != NULL) {
+               INIT_LIST_HEAD(&hwreq->queue);
+               INIT_LIST_HEAD(&hwreq->tds);
        }
 
-       return (mReq == NULL) ? NULL : &mReq->req;
+       return (hwreq == NULL) ? NULL : &hwreq->req;
 }
 
 /**
@@ -1154,24 +1250,30 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  */
 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
 {
-       struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-       struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+       struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
+       struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
+       struct td_node *node, *tmpnode;
        unsigned long flags;
 
        if (ep == NULL || req == NULL) {
                return;
-       } else if (!list_empty(&mReq->queue)) {
-               dev_err(mEp->ci->dev, "freeing queued request\n");
+       } else if (!list_empty(&hwreq->queue)) {
+               dev_err(hwep->ci->dev, "freeing queued request\n");
                return;
        }
 
-       spin_lock_irqsave(mEp->lock, flags);
+       spin_lock_irqsave(hwep->lock, flags);
 
-       if (mReq->ptr)
-               dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
-       kfree(mReq);
+       list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+               dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+               list_del_init(&node->td);
+               node->ptr = NULL;
+               kfree(node);
+       }
+
+       kfree(hwreq);
 
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_unlock_irqrestore(hwep->lock, flags);
 }
 
 /**
@@ -1182,16 +1284,16 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
                    gfp_t __maybe_unused gfp_flags)
 {
-       struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
+       struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
        int retval = 0;
        unsigned long flags;
 
-       if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
+       if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
                return -EINVAL;
 
-       spin_lock_irqsave(mEp->lock, flags);
+       spin_lock_irqsave(hwep->lock, flags);
        retval = _ep_queue(ep, req, gfp_flags);
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_unlock_irqrestore(hwep->lock, flags);
        return retval;
 }
 
@@ -1202,33 +1304,33 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
  */
 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
 {
-       struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-       struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+       struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
+       struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
        unsigned long flags;
 
-       if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
-               mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
-               list_empty(&mEp->qh.queue))
+       if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
+               hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
+               list_empty(&hwep->qh.queue))
                return -EINVAL;
 
-       spin_lock_irqsave(mEp->lock, flags);
+       spin_lock_irqsave(hwep->lock, flags);
 
-       hw_ep_flush(mEp->ci, mEp->num, mEp->dir);
+       hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
        /* pop request */
-       list_del_init(&mReq->queue);
+       list_del_init(&hwreq->queue);
 
-       usb_gadget_unmap_request(&mEp->ci->gadget, req, mEp->dir);
+       usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
 
        req->status = -ECONNRESET;
 
-       if (mReq->req.complete != NULL) {
-               spin_unlock(mEp->lock);
-               mReq->req.complete(&mEp->ep, &mReq->req);
-               spin_lock(mEp->lock);
+       if (hwreq->req.complete != NULL) {
+               spin_unlock(hwep->lock);
+               hwreq->req.complete(&hwep->ep, &hwreq->req);
+               spin_lock(hwep->lock);
        }
 
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_unlock_irqrestore(hwep->lock, flags);
        return 0;
 }
 
@@ -1239,37 +1341,40 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  */
 static int ep_set_halt(struct usb_ep *ep, int value)
 {
-       struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+       struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
        int direction, retval = 0;
        unsigned long flags;
 
-       if (ep == NULL || mEp->ep.desc == NULL)
+       if (ep == NULL || hwep->ep.desc == NULL)
                return -EINVAL;
 
-       spin_lock_irqsave(mEp->lock, flags);
+       if (usb_endpoint_xfer_isoc(hwep->ep.desc))
+               return -EOPNOTSUPP;
+
+       spin_lock_irqsave(hwep->lock, flags);
 
 #ifndef STALL_IN
        /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
-       if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
-           !list_empty(&mEp->qh.queue)) {
-               spin_unlock_irqrestore(mEp->lock, flags);
+       if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
+           !list_empty(&hwep->qh.queue)) {
+               spin_unlock_irqrestore(hwep->lock, flags);
                return -EAGAIN;
        }
 #endif
 
-       direction = mEp->dir;
+       direction = hwep->dir;
        do {
-               retval |= hw_ep_set_halt(mEp->ci, mEp->num, mEp->dir, value);
+               retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
 
                if (!value)
-                       mEp->wedge = 0;
+                       hwep->wedge = 0;
 
-               if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-                       mEp->dir = (mEp->dir == TX) ? RX : TX;
+               if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+                       hwep->dir = (hwep->dir == TX) ? RX : TX;
 
-       } while (mEp->dir != direction);
+       } while (hwep->dir != direction);
 
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_unlock_irqrestore(hwep->lock, flags);
        return retval;
 }
 
@@ -1280,15 +1385,15 @@ static int ep_set_halt(struct usb_ep *ep, int value)
  */
 static int ep_set_wedge(struct usb_ep *ep)
 {
-       struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+       struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
        unsigned long flags;
 
-       if (ep == NULL || mEp->ep.desc == NULL)
+       if (ep == NULL || hwep->ep.desc == NULL)
                return -EINVAL;
 
-       spin_lock_irqsave(mEp->lock, flags);
-       mEp->wedge = 1;
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_lock_irqsave(hwep->lock, flags);
+       hwep->wedge = 1;
+       spin_unlock_irqrestore(hwep->lock, flags);
 
        return usb_ep_set_halt(ep);
 }
@@ -1300,19 +1405,19 @@ static int ep_set_wedge(struct usb_ep *ep)
  */
 static void ep_fifo_flush(struct usb_ep *ep)
 {
-       struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+       struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
        unsigned long flags;
 
        if (ep == NULL) {
-               dev_err(mEp->ci->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
+               dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
                return;
        }
 
-       spin_lock_irqsave(mEp->lock, flags);
+       spin_lock_irqsave(hwep->lock, flags);
 
-       hw_ep_flush(mEp->ci, mEp->num, mEp->dir);
+       hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
-       spin_unlock_irqrestore(mEp->lock, flags);
+       spin_unlock_irqrestore(hwep->lock, flags);
 }
 
 /**
@@ -1334,13 +1439,13 @@ static const struct usb_ep_ops usb_ep_ops = {
 /******************************************************************************
  * GADGET block
  *****************************************************************************/
-static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
+static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
 {
-       struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
+       struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
        unsigned long flags;
        int gadget_ready = 0;
 
-       if (!(ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS))
+       if (!(ci->platdata->flags & CI_HDRC_PULLUP_ON_VBUS))
                return -EOPNOTSUPP;
 
        spin_lock_irqsave(&ci->lock, flags);
@@ -1358,7 +1463,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
                        hw_device_state(ci, 0);
                        if (ci->platdata->notify_event)
                                ci->platdata->notify_event(ci,
-                               CI13XXX_CONTROLLER_STOPPED_EVENT);
+                               CI_HDRC_CONTROLLER_STOPPED_EVENT);
                        _gadget_stop_activity(&ci->gadget);
                        pm_runtime_put_sync(&_gadget->dev);
                }
@@ -1367,9 +1472,9 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
        return 0;
 }
 
-static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+static int ci_udc_wakeup(struct usb_gadget *_gadget)
 {
-       struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
+       struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
        unsigned long flags;
        int ret = 0;
 
@@ -1388,21 +1493,21 @@ out:
        return ret;
 }
 
-static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
 {
-       struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
+       struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
 
        if (ci->transceiver)
-               return usb_phy_set_power(ci->transceiver, mA);
+               return usb_phy_set_power(ci->transceiver, ma);
        return -ENOTSUPP;
 }
 
 /* Change Data+ pullup status
  * this func is used by usb_gadget_connect/disconnet
  */
-static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_on)
+static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
 {
-       struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
+       struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
 
        if (is_on)
                hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
@@ -1412,9 +1517,9 @@ static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_on)
        return 0;
 }
 
-static int ci13xxx_start(struct usb_gadget *gadget,
+static int ci_udc_start(struct usb_gadget *gadget,
                         struct usb_gadget_driver *driver);
-static int ci13xxx_stop(struct usb_gadget *gadget,
+static int ci_udc_stop(struct usb_gadget *gadget,
                        struct usb_gadget_driver *driver);
 /**
  * Device operations part of the API to the USB controller hardware,
@@ -1422,46 +1527,46 @@ static int ci13xxx_stop(struct usb_gadget *gadget,
  * Check  "usb_gadget.h" for details
  */
 static const struct usb_gadget_ops usb_gadget_ops = {
-       .vbus_session   = ci13xxx_vbus_session,
-       .wakeup         = ci13xxx_wakeup,
-       .pullup         = ci13xxx_pullup,
-       .vbus_draw      = ci13xxx_vbus_draw,
-       .udc_start      = ci13xxx_start,
-       .udc_stop       = ci13xxx_stop,
+       .vbus_session   = ci_udc_vbus_session,
+       .wakeup         = ci_udc_wakeup,
+       .pullup         = ci_udc_pullup,
+       .vbus_draw      = ci_udc_vbus_draw,
+       .udc_start      = ci_udc_start,
+       .udc_stop       = ci_udc_stop,
 };
 
-static int init_eps(struct ci13xxx *ci)
+static int init_eps(struct ci_hdrc *ci)
 {
        int retval = 0, i, j;
 
        for (i = 0; i < ci->hw_ep_max/2; i++)
                for (j = RX; j <= TX; j++) {
                        int k = i + j * ci->hw_ep_max/2;
-                       struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[k];
+                       struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
 
-                       scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+                       scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
                                        (j == TX)  ? "in" : "out");
 
-                       mEp->ci          = ci;
-                       mEp->lock         = &ci->lock;
-                       mEp->td_pool      = ci->td_pool;
+                       hwep->ci          = ci;
+                       hwep->lock         = &ci->lock;
+                       hwep->td_pool      = ci->td_pool;
 
-                       mEp->ep.name      = mEp->name;
-                       mEp->ep.ops       = &usb_ep_ops;
+                       hwep->ep.name      = hwep->name;
+                       hwep->ep.ops       = &usb_ep_ops;
                        /*
                         * for ep0: maxP defined in desc, for other
                         * eps, maxP is set by epautoconfig() called
                         * by gadget layer
                         */
-                       mEp->ep.maxpacket = (unsigned short)~0;
+                       hwep->ep.maxpacket = (unsigned short)~0;
 
-                       INIT_LIST_HEAD(&mEp->qh.queue);
-                       mEp->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
-                                                    &mEp->qh.dma);
-                       if (mEp->qh.ptr == NULL)
+                       INIT_LIST_HEAD(&hwep->qh.queue);
+                       hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
+                                                    &hwep->qh.dma);
+                       if (hwep->qh.ptr == NULL)
                                retval = -ENOMEM;
                        else
-                               memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+                               memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
 
                        /*
                         * set up shorthands for ep0 out and in endpoints,
@@ -1469,42 +1574,42 @@ static int init_eps(struct ci13xxx *ci)
                         */
                        if (i == 0) {
                                if (j == RX)
-                                       ci->ep0out = mEp;
+                                       ci->ep0out = hwep;
                                else
-                                       ci->ep0in = mEp;
+                                       ci->ep0in = hwep;
 
-                               mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+                               hwep->ep.maxpacket = CTRL_PAYLOAD_MAX;
                                continue;
                        }
 
-                       list_add_tail(&mEp->ep.ep_list, &ci->gadget.ep_list);
+                       list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
                }
 
        return retval;
 }
 
-static void destroy_eps(struct ci13xxx *ci)
+static void destroy_eps(struct ci_hdrc *ci)
 {
        int i;
 
        for (i = 0; i < ci->hw_ep_max; i++) {
-               struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
+               struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
 
-               dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma);
+               dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
        }
 }
 
 /**
- * ci13xxx_start: register a gadget driver
+ * ci_udc_start: register a gadget driver
  * @gadget: our gadget
  * @driver: the driver being registered
  *
  * Interrupts are enabled here.
  */
-static int ci13xxx_start(struct usb_gadget *gadget,
+static int ci_udc_start(struct usb_gadget *gadget,
                         struct usb_gadget_driver *driver)
 {
-       struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget);
+       struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
        unsigned long flags;
        int retval = -ENOMEM;
 
@@ -1525,9 +1630,9 @@ static int ci13xxx_start(struct usb_gadget *gadget,
 
        ci->driver = driver;
        pm_runtime_get_sync(&ci->gadget.dev);
-       if (ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS) {
+       if (ci->platdata->flags & CI_HDRC_PULLUP_ON_VBUS) {
                if (ci->vbus_active) {
-                       if (ci->platdata->flags & CI13XXX_REGS_SHARED)
+                       if (ci->platdata->flags & CI_HDRC_REGS_SHARED)
                                hw_device_reset(ci, USBMODE_CM_DC);
                } else {
                        pm_runtime_put_sync(&ci->gadget.dev);
@@ -1545,22 +1650,22 @@ static int ci13xxx_start(struct usb_gadget *gadget,
 }
 
 /**
- * ci13xxx_stop: unregister a gadget driver
+ * ci_udc_stop: unregister a gadget driver
  */
-static int ci13xxx_stop(struct usb_gadget *gadget,
+static int ci_udc_stop(struct usb_gadget *gadget,
                        struct usb_gadget_driver *driver)
 {
-       struct ci13xxx *ci = container_of(gadget, struct ci13xxx, gadget);
+       struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
        unsigned long flags;
 
        spin_lock_irqsave(&ci->lock, flags);
 
-       if (!(ci->platdata->flags & CI13XXX_PULLUP_ON_VBUS) ||
+       if (!(ci->platdata->flags & CI_HDRC_PULLUP_ON_VBUS) ||
                        ci->vbus_active) {
                hw_device_state(ci, 0);
                if (ci->platdata->notify_event)
                        ci->platdata->notify_event(ci,
-                       CI13XXX_CONTROLLER_STOPPED_EVENT);
+                       CI_HDRC_CONTROLLER_STOPPED_EVENT);
                ci->driver = NULL;
                spin_unlock_irqrestore(&ci->lock, flags);
                _gadget_stop_activity(&ci->gadget);
@@ -1582,7 +1687,7 @@ static int ci13xxx_stop(struct usb_gadget *gadget,
  * This function returns IRQ_HANDLED if the IRQ has been handled
  * It locks access to registers
  */
-static irqreturn_t udc_irq(struct ci13xxx *ci)
+static irqreturn_t udc_irq(struct ci_hdrc *ci)
 {
        irqreturn_t retval;
        u32 intr;
@@ -1592,7 +1697,7 @@ static irqreturn_t udc_irq(struct ci13xxx *ci)
 
        spin_lock(&ci->lock);
 
-       if (ci->platdata->flags & CI13XXX_REGS_SHARED) {
+       if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
                if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
                                USBMODE_CM_DC) {
                        spin_unlock(&ci->lock);
@@ -1642,7 +1747,7 @@ static irqreturn_t udc_irq(struct ci13xxx *ci)
  * udc_start: initialize gadget role
  * @ci: chipidea controller
  */
-static int udc_start(struct ci13xxx *ci)
+static int udc_start(struct ci_hdrc *ci)
 {
        struct device *dev = ci->dev;
        int retval = 0;
@@ -1658,15 +1763,15 @@ static int udc_start(struct ci13xxx *ci)
        INIT_LIST_HEAD(&ci->gadget.ep_list);
 
        /* alloc resources */
-       ci->qh_pool = dma_pool_create("ci13xxx_qh", dev,
-                                      sizeof(struct ci13xxx_qh),
-                                      64, CI13XXX_PAGE_SIZE);
+       ci->qh_pool = dma_pool_create("ci_hw_qh", dev,
+                                      sizeof(struct ci_hw_qh),
+                                      64, CI_HDRC_PAGE_SIZE);
        if (ci->qh_pool == NULL)
                return -ENOMEM;
 
-       ci->td_pool = dma_pool_create("ci13xxx_td", dev,
-                                      sizeof(struct ci13xxx_td),
-                                      64, CI13XXX_PAGE_SIZE);
+       ci->td_pool = dma_pool_create("ci_hw_td", dev,
+                                      sizeof(struct ci_hw_td),
+                                      64, CI_HDRC_PAGE_SIZE);
        if (ci->td_pool == NULL) {
                retval = -ENOMEM;
                goto free_qh_pool;
@@ -1684,14 +1789,14 @@ static int udc_start(struct ci13xxx *ci)
                        ci->transceiver = NULL;
        }
 
-       if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
+       if (ci->platdata->flags & CI_HDRC_REQUIRE_TRANSCEIVER) {
                if (ci->transceiver == NULL) {
                        retval = -ENODEV;
                        goto destroy_eps;
                }
        }
 
-       if (!(ci->platdata->flags & CI13XXX_REGS_SHARED)) {
+       if (!(ci->platdata->flags & CI_HDRC_REGS_SHARED)) {
                retval = hw_device_reset(ci, USBMODE_CM_DC);
                if (retval)
                        goto put_transceiver;
@@ -1738,7 +1843,7 @@ free_qh_pool:
  *
  * No interrupts active, the IRQ has been released
  */
-static void udc_stop(struct ci13xxx *ci)
+static void udc_stop(struct ci_hdrc *ci)
 {
        if (ci == NULL)
                return;
@@ -1765,7 +1870,7 @@ static void udc_stop(struct ci13xxx *ci)
  *
  * This function enables the gadget role, if the device is "device capable".
  */
-int ci_hdrc_gadget_init(struct ci13xxx *ci)
+int ci_hdrc_gadget_init(struct ci_hdrc *ci)
 {
        struct ci_role_driver *rdrv;
 
index d12e8b59b11028bf497e561eacaab2cd513a5076..455ac2169226141b703878ecae3ecef5a6a1ace4 100644 (file)
@@ -20,7 +20,7 @@
 #define TX        1  /* similar to USB_DIR_IN  but can be used as an index */
 
 /* DMA layout of transfer descriptors */
-struct ci13xxx_td {
+struct ci_hw_td {
        /* 0 */
        u32 next;
 #define TD_TERMINATE          BIT(0)
@@ -43,24 +43,31 @@ struct ci13xxx_td {
 } __attribute__ ((packed, aligned(4)));
 
 /* DMA layout of queue heads */
-struct ci13xxx_qh {
+struct ci_hw_qh {
        /* 0 */
        u32 cap;
 #define QH_IOS                BIT(15)
 #define QH_MAX_PKT            (0x07FFUL << 16)
 #define QH_ZLT                BIT(29)
 #define QH_MULT               (0x0003UL << 30)
+#define QH_ISO_MULT(x)         ((x >> 11) & 0x03)
        /* 1 */
        u32 curr;
        /* 2 - 8 */
-       struct ci13xxx_td        td;
+       struct ci_hw_td         td;
        /* 9 */
        u32 RESERVED;
        struct usb_ctrlrequest   setup;
 } __attribute__ ((packed, aligned(4)));
 
+struct td_node {
+       struct list_head        td;
+       dma_addr_t              dma;
+       struct ci_hw_td         *ptr;
+};
+
 /**
- * struct ci13xxx_req - usb request representation
+ * struct ci_hw_req - usb request representation
  * @req: request structure for gadget drivers
  * @queue: link to QH list
  * @ptr: transfer descriptor for this request
@@ -68,22 +75,19 @@ struct ci13xxx_qh {
  * @zptr: transfer descriptor for the zero packet
  * @zdma: dma address of the zero packet's transfer descriptor
  */
-struct ci13xxx_req {
+struct ci_hw_req {
        struct usb_request      req;
        struct list_head        queue;
-       struct ci13xxx_td       *ptr;
-       dma_addr_t              dma;
-       struct ci13xxx_td       *zptr;
-       dma_addr_t              zdma;
+       struct list_head        tds;
 };
 
 #ifdef CONFIG_USB_CHIPIDEA_UDC
 
-int ci_hdrc_gadget_init(struct ci13xxx *ci);
+int ci_hdrc_gadget_init(struct ci_hdrc *ci);
 
 #else
 
-static inline int ci_hdrc_gadget_init(struct ci13xxx *ci)
+static inline int ci_hdrc_gadget_init(struct ci_hdrc *ci)
 {
        return -ENXIO;
 }
index 714a6bd810ede0de96b28d3523bed2baa703f1de..ac5a4615520011089be9958c76bf702c1a1b38c3 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 
-#include "ci13xxx_imx.h"
+#include "ci_hdrc_imx.h"
 
 #define USB_DEV_MAX 4
 
@@ -175,6 +175,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
        },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
 
 static int usbmisc_imx_probe(struct platform_device *pdev)
 {
@@ -243,17 +244,7 @@ static struct platform_driver usbmisc_imx_driver = {
         },
 };
 
-int usbmisc_imx_drv_init(void)
-{
-       return platform_driver_register(&usbmisc_imx_driver);
-}
-subsys_initcall(usbmisc_imx_drv_init);
-
-void usbmisc_imx_drv_exit(void)
-{
-       platform_driver_unregister(&usbmisc_imx_driver);
-}
-module_exit(usbmisc_imx_drv_exit);
+module_platform_driver(usbmisc_imx_driver);
 
 MODULE_ALIAS("platform:usbmisc-imx");
 MODULE_LICENSE("GPL v2");
index 9b1cbcf8fb7fcfbd0f3b576d207a90dd4421c62f..9f49bfe4c6f4a24e112a71ecabee13b6e59aec0a 100644 (file)
@@ -216,38 +216,6 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
        return rc;
 }
 
-static int acm_write_start(struct acm *acm, int wbn)
-{
-       unsigned long flags;
-       struct acm_wb *wb = &acm->wb[wbn];
-       int rc;
-
-       spin_lock_irqsave(&acm->write_lock, flags);
-       if (!acm->dev) {
-               wb->use = 0;
-               spin_unlock_irqrestore(&acm->write_lock, flags);
-               return -ENODEV;
-       }
-
-       dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__,
-                                                       acm->susp_count);
-       usb_autopm_get_interface_async(acm->control);
-       if (acm->susp_count) {
-               if (!acm->delayed_wb)
-                       acm->delayed_wb = wb;
-               else
-                       usb_autopm_put_interface_async(acm->control);
-               spin_unlock_irqrestore(&acm->write_lock, flags);
-               return 0;       /* A white lie */
-       }
-       usb_mark_last_busy(acm->dev);
-
-       rc = acm_start_wb(acm, wb);
-       spin_unlock_irqrestore(&acm->write_lock, flags);
-
-       return rc;
-
-}
 /*
  * attributes exported through sysfs
  */
@@ -653,13 +621,31 @@ static int acm_tty_write(struct tty_struct *tty,
        }
        wb = &acm->wb[wbn];
 
+       if (!acm->dev) {
+               wb->use = 0;
+               spin_unlock_irqrestore(&acm->write_lock, flags);
+               return -ENODEV;
+       }
+
        count = (count > acm->writesize) ? acm->writesize : count;
        dev_vdbg(&acm->data->dev, "%s - write %d\n", __func__, count);
        memcpy(wb->buf, buf, count);
        wb->len = count;
+
+       usb_autopm_get_interface_async(acm->control);
+       if (acm->susp_count) {
+               if (!acm->delayed_wb)
+                       acm->delayed_wb = wb;
+               else
+                       usb_autopm_put_interface_async(acm->control);
+               spin_unlock_irqrestore(&acm->write_lock, flags);
+               return count;   /* A white lie */
+       }
+       usb_mark_last_busy(acm->dev);
+
+       stat = acm_start_wb(acm, wb);
        spin_unlock_irqrestore(&acm->write_lock, flags);
 
-       stat = acm_write_start(acm, wbn);
        if (stat < 0)
                return stat;
        return count;
index 4c5506ae5e450db8a0977a618eb5e697bd6be1d0..609dbc2f7151d52a08cf2f7b62d229637491ce55 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/usb/tmc.h>
 
 
+#define RIGOL                  1
+#define USBTMC_HEADER_SIZE     12
 #define USBTMC_MINOR_BASE      176
 
 /*
@@ -84,6 +86,8 @@ struct usbtmc_device_data {
        u8 bTag_last_write;     /* needed for abort */
        u8 bTag_last_read;      /* needed for abort */
 
+       u8 rigol_quirk;
+
        /* attributes from the USB TMC spec for this device */
        u8 TermChar;
        bool TermCharEnabled;
@@ -97,6 +101,16 @@ struct usbtmc_device_data {
 };
 #define to_usbtmc_data(d) container_of(d, struct usbtmc_device_data, kref)
 
+struct usbtmc_ID_rigol_quirk {
+       __u16 idVendor;
+       __u16 idProduct;
+};
+
+static const struct usbtmc_ID_rigol_quirk usbtmc_id_quirk[] = {
+       { 0x1ab1, 0x0588 },
+       { 0, 0 }
+};
+
 /* Forward declarations */
 static struct usb_driver usbtmc_driver;
 
@@ -361,6 +375,59 @@ exit:
        return rv;
 }
 
+/*
+ * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-IN endpoint.
+ * @transfer_size: number of bytes to request from the device.
+ *
+ * See the USBTMC specification, Table 4.
+ *
+ * Also updates bTag_last_write.
+ */
+static int send_request_dev_dep_msg_in(struct usbtmc_device_data *data, size_t transfer_size)
+{
+       int retval;
+       u8 buffer[USBTMC_HEADER_SIZE];
+       int actual;
+
+       /* Setup IO buffer for REQUEST_DEV_DEP_MSG_IN message
+        * Refer to class specs for details
+        */
+       buffer[0] = 2;
+       buffer[1] = data->bTag;
+       buffer[2] = ~(data->bTag);
+       buffer[3] = 0; /* Reserved */
+       buffer[4] = (transfer_size) & 255;
+       buffer[5] = ((transfer_size) >> 8) & 255;
+       buffer[6] = ((transfer_size) >> 16) & 255;
+       buffer[7] = ((transfer_size) >> 24) & 255;
+       buffer[8] = data->TermCharEnabled * 2;
+       /* Use term character? */
+       buffer[9] = data->TermChar;
+       buffer[10] = 0; /* Reserved */
+       buffer[11] = 0; /* Reserved */
+
+       /* Send bulk URB */
+       retval = usb_bulk_msg(data->usb_dev,
+                             usb_sndbulkpipe(data->usb_dev,
+                                             data->bulk_out),
+                             buffer, USBTMC_HEADER_SIZE, &actual, USBTMC_TIMEOUT);
+
+       /* Store bTag (in case we need to abort) */
+       data->bTag_last_write = data->bTag;
+
+       /* Increment bTag -- and increment again if zero */
+       data->bTag++;
+       if (!data->bTag)
+               (data->bTag)++;
+
+       if (retval < 0) {
+               dev_err(&data->intf->dev, "usb_bulk_msg in send_request_dev_dep_msg_in() returned %d\n", retval);
+               return retval;
+       }
+
+       return 0;
+}
+
 static ssize_t usbtmc_read(struct file *filp, char __user *buf,
                           size_t count, loff_t *f_pos)
 {
@@ -388,51 +455,39 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
                goto exit;
        }
 
-       remaining = count;
-       done = 0;
+       if (data->rigol_quirk) {
+               dev_dbg(dev, "usb_bulk_msg_in: count(%zu)\n", count);
 
-       while (remaining > 0) {
-               if (remaining > USBTMC_SIZE_IOBUFFER - 12 - 3)
-                       this_part = USBTMC_SIZE_IOBUFFER - 12 - 3;
-               else
-                       this_part = remaining;
+               retval = send_request_dev_dep_msg_in(data, count);
 
-               /* Setup IO buffer for DEV_DEP_MSG_IN message
-                * Refer to class specs for details
-                */
-               buffer[0] = 2;
-               buffer[1] = data->bTag;
-               buffer[2] = ~(data->bTag);
-               buffer[3] = 0; /* Reserved */
-               buffer[4] = (this_part) & 255;
-               buffer[5] = ((this_part) >> 8) & 255;
-               buffer[6] = ((this_part) >> 16) & 255;
-               buffer[7] = ((this_part) >> 24) & 255;
-               buffer[8] = data->TermCharEnabled * 2;
-               /* Use term character? */
-               buffer[9] = data->TermChar;
-               buffer[10] = 0; /* Reserved */
-               buffer[11] = 0; /* Reserved */
+               if (retval < 0) {
+                       if (data->auto_abort)
+                               usbtmc_ioctl_abort_bulk_out(data);
+                       goto exit;
+               }
+       }
 
-               /* Send bulk URB */
-               retval = usb_bulk_msg(data->usb_dev,
-                                     usb_sndbulkpipe(data->usb_dev,
-                                                     data->bulk_out),
-                                     buffer, 12, &actual, USBTMC_TIMEOUT);
+       /* Loop until we have fetched everything we requested */
+       remaining = count;
+       this_part = remaining;
+       done = 0;
 
-               /* Store bTag (in case we need to abort) */
-               data->bTag_last_write = data->bTag;
+       while (remaining > 0) {
+               if (!(data->rigol_quirk)) {
+                       dev_dbg(dev, "usb_bulk_msg_in: remaining(%zu), count(%zu)\n", remaining, count);
 
-               /* Increment bTag -- and increment again if zero */
-               data->bTag++;
-               if (!data->bTag)
-                       (data->bTag)++;
+                       if (remaining > USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE - 3)
+                               this_part = USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE - 3;
+                       else
+                               this_part = remaining;
 
-               if (retval < 0) {
+                       retval = send_request_dev_dep_msg_in(data, this_part);
+                       if (retval < 0) {
                        dev_err(dev, "usb_bulk_msg returned %d\n", retval);
-                       if (data->auto_abort)
-                               usbtmc_ioctl_abort_bulk_out(data);
-                       goto exit;
+                               if (data->auto_abort)
+                                       usbtmc_ioctl_abort_bulk_out(data);
+                               goto exit;
+                       }
                }
 
                /* Send bulk URB */
@@ -442,51 +497,109 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
                                      buffer, USBTMC_SIZE_IOBUFFER, &actual,
                                      USBTMC_TIMEOUT);
 
+               dev_dbg(dev, "usb_bulk_msg: retval(%u), done(%zu), remaining(%zu), actual(%d)\n", retval, done, remaining, actual);
+
                /* Store bTag (in case we need to abort) */
                data->bTag_last_read = data->bTag;
 
                if (retval < 0) {
-                       dev_err(dev, "Unable to read data, error %d\n", retval);
+                       dev_dbg(dev, "Unable to read data, error %d\n", retval);
                        if (data->auto_abort)
                                usbtmc_ioctl_abort_bulk_in(data);
                        goto exit;
                }
 
-               /* How many characters did the instrument send? */
-               n_characters = buffer[4] +
-                              (buffer[5] << 8) +
-                              (buffer[6] << 16) +
-                              (buffer[7] << 24);
+               /* Parse header in first packet */
+               if ((done == 0) || (!(data->rigol_quirk))) {
+                       /* Sanity checks for the header */
+                       if (actual < USBTMC_HEADER_SIZE) {
+                               dev_err(dev, "Device sent too small first packet: %u < %u\n", actual, USBTMC_HEADER_SIZE);
+                               if (data->auto_abort)
+                                       usbtmc_ioctl_abort_bulk_in(data);
+                               goto exit;
+                       }
 
-               /* Ensure the instrument doesn't lie about it */
-               if(n_characters > actual - 12) {
-                       dev_err(dev, "Device lies about message size: %u > %d\n", n_characters, actual - 12);
-                       n_characters = actual - 12;
-               }
+                       if (buffer[0] != 2) {
+                               dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n", buffer[0]);
+                               if (data->auto_abort)
+                                       usbtmc_ioctl_abort_bulk_in(data);
+                               goto exit;
+                       }
 
-               /* Ensure the instrument doesn't send more back than requested */
-               if(n_characters > this_part) {
-                       dev_err(dev, "Device returns more than requested: %zu > %zu\n", done + n_characters, done + this_part);
-                       n_characters = this_part;
-               }
+                       if (buffer[1] != data->bTag_last_write) {
+                               dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n", buffer[1], data->bTag_last_write);
+                               if (data->auto_abort)
+                                       usbtmc_ioctl_abort_bulk_in(data);
+                               goto exit;
+                       }
 
-               /* Bound amount of data received by amount of data requested */
-               if (n_characters > this_part)
-                       n_characters = this_part;
+                       /* How many characters did the instrument send? */
+                       n_characters = buffer[4] +
+                                      (buffer[5] << 8) +
+                                      (buffer[6] << 16) +
+                                      (buffer[7] << 24);
 
-               /* Copy buffer to user space */
-               if (copy_to_user(buf + done, &buffer[12], n_characters)) {
-                       /* There must have been an addressing problem */
-                       retval = -EFAULT;
-                       goto exit;
+                       if (n_characters > this_part) {
+                               dev_err(dev, "Device wants to return more data than requested: %u > %zu\n", n_characters, count);
+                               if (data->auto_abort)
+                                       usbtmc_ioctl_abort_bulk_in(data);
+                               goto exit;
+                       }
+
+                       /* Remove the USBTMC header */
+                       actual -= USBTMC_HEADER_SIZE;
+
+                       /* Check if the message is smaller than requested */
+                       if (data->rigol_quirk) {
+                               if (remaining > n_characters)
+                                       remaining = n_characters;
+                               /* Remove padding if it exists */
+                               if (actual > remaining) 
+                                       actual = remaining;
+                       }
+                       else {
+                               if (this_part > n_characters)
+                                       this_part = n_characters;
+                               /* Remove padding if it exists */
+                               if (actual > this_part) 
+                                       actual = this_part;
+                       }
+
+                       dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n", n_characters, buffer[8]);
+
+                       remaining -= actual;
+
+                       /* Terminate if end-of-message bit received from device */
+                       if ((buffer[8] &  0x01) && (actual >= n_characters))
+                               remaining = 0;
+
+                       dev_dbg(dev, "Bulk-IN header: remaining(%zu), buf(%p), buffer(%p) done(%zu)\n", remaining,buf,buffer,done);
+
+
+                       /* Copy buffer to user space */
+                       if (copy_to_user(buf + done, &buffer[USBTMC_HEADER_SIZE], actual)) {
+                               /* There must have been an addressing problem */
+                               retval = -EFAULT;
+                               goto exit;
+                       }
+                       done += actual;
                }
+               else  {
+                       if (actual > remaining) 
+                               actual = remaining;
+
+                       remaining -= actual;
+
+                       dev_dbg(dev, "Bulk-IN header cont: actual(%u), done(%zu), remaining(%zu), buf(%p), buffer(%p)\n", actual, done, remaining,buf,buffer);
 
-               done += n_characters;
-               /* Terminate if end-of-message bit received from device */
-               if ((buffer[8] &  0x01) && (actual >= n_characters + 12))
-                       remaining = 0;
-               else
-                       remaining -= n_characters;
+                       /* Copy buffer to user space */
+                       if (copy_to_user(buf + done, buffer, actual)) {
+                               /* There must have been an addressing problem */
+                               retval = -EFAULT;
+                               goto exit;
+                       }
+                       done += actual;
+               }
        }
 
        /* Update file position value */
@@ -527,8 +640,8 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
        done = 0;
 
        while (remaining > 0) {
-               if (remaining > USBTMC_SIZE_IOBUFFER - 12) {
-                       this_part = USBTMC_SIZE_IOBUFFER - 12;
+               if (remaining > USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE) {
+                       this_part = USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE;
                        buffer[8] = 0;
                } else {
                        this_part = remaining;
@@ -549,13 +662,13 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
                buffer[10] = 0; /* Reserved */
                buffer[11] = 0; /* Reserved */
 
-               if (copy_from_user(&buffer[12], buf + done, this_part)) {
+               if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf + done, this_part)) {
                        retval = -EFAULT;
                        goto exit;
                }
 
-               n_bytes = roundup(12 + this_part, 4);
-               memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
+               n_bytes = roundup(USBTMC_HEADER_SIZE + this_part, 4);
+               memset(buffer + USBTMC_HEADER_SIZE + this_part, 0, n_bytes - (USBTMC_HEADER_SIZE + this_part));
 
                do {
                        retval = usb_bulk_msg(data->usb_dev,
@@ -1003,6 +1116,20 @@ static int usbtmc_probe(struct usb_interface *intf,
        mutex_init(&data->io_mutex);
        data->zombie = 0;
 
+       /* Determine if it is a Rigol or not */
+       data->rigol_quirk = 0;
+       dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
+               data->usb_dev->descriptor.idVendor,
+               data->usb_dev->descriptor.idProduct);
+       for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
+               if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) &&
+                   (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) {
+                       dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
+                       data->rigol_quirk = 1;
+                       break;
+               }
+       }
+
        /* Initialize USBTMC bTag and other fields */
        data->bTag      = 1;
        data->TermCharEnabled = 0;
index c88c4fb9459dfbc94119e8a0590a924fa5868f0c..05986507b585897c763ac58e067e8a25f7e75374 100644 (file)
 #include <linux/security.h>
 #include <linux/user_namespace.h>
 #include <linux/scatterlist.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/byteorder.h>
 #include <linux/moduleparam.h>
 
 #include "usb.h"
 
 #define USB_MAXBUS                     64
-#define USB_DEVICE_MAX                 USB_MAXBUS * 128
+#define USB_DEVICE_MAX                 (USB_MAXBUS * 128)
 #define USB_SG_SIZE                    16384 /* split-size for large txs */
 
 /* Mutual exclusion for removal, open, and release */
@@ -1804,7 +1804,8 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
 
        /* alloc buffer */
        if ((size = _IOC_SIZE(ctl->ioctl_code)) > 0) {
-               if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
+               buf = kmalloc(size, GFP_KERNEL);
+               if (buf == NULL)
                        return -ENOMEM;
                if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) {
                        if (copy_from_user(buf, ctl->data, size)) {
index e5387a47ef6fea27f978782bbed9375f4ff22d1d..6a4c40766f0f131d0d133c3e263f5c3eb4aa9929 100644 (file)
@@ -94,7 +94,7 @@ static int init_usb_class(void)
        kref_init(&usb_class->kref);
        usb_class->class = class_create(THIS_MODULE, "usbmisc");
        if (IS_ERR(usb_class->class)) {
-               result = IS_ERR(usb_class->class);
+               result = PTR_ERR(usb_class->class);
                printk(KERN_ERR "class_create failed for usb devices\n");
                kfree(usb_class);
                usb_class = NULL;
index d53547d2e4c744c92b43ce55883351aa241f8fb4..014dc996b4f6e0da44a8da8886ef5008307c6541 100644 (file)
@@ -149,6 +149,27 @@ static const u8 usb3_rh_dev_descriptor[18] = {
        0x01        /*  __u8  bNumConfigurations; */
 };
 
+/* usb 2.5 (wireless USB 1.0) root hub device descriptor */
+static const u8 usb25_rh_dev_descriptor[18] = {
+       0x12,       /*  __u8  bLength; */
+       0x01,       /*  __u8  bDescriptorType; Device */
+       0x50, 0x02, /*  __le16 bcdUSB; v2.5 */
+
+       0x09,       /*  __u8  bDeviceClass; HUB_CLASSCODE */
+       0x00,       /*  __u8  bDeviceSubClass; */
+       0x00,       /*  __u8  bDeviceProtocol; [ usb 2.0 no TT ] */
+       0xFF,       /*  __u8  bMaxPacketSize0; always 0xFF (WUSB Spec 7.4.1). */
+
+       0x6b, 0x1d, /*  __le16 idVendor; Linux Foundation 0x1d6b */
+       0x02, 0x00, /*  __le16 idProduct; device 0x0002 */
+       KERNEL_VER, KERNEL_REL, /*  __le16 bcdDevice */
+
+       0x03,       /*  __u8  iManufacturer; */
+       0x02,       /*  __u8  iProduct; */
+       0x01,       /*  __u8  iSerialNumber; */
+       0x01        /*  __u8  bNumConfigurations; */
+};
+
 /* usb 2.0 root hub device descriptor */
 static const u8 usb2_rh_dev_descriptor [18] = {
        0x12,       /*  __u8  bLength; */
@@ -527,6 +548,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
                        case HCD_USB3:
                                bufp = usb3_rh_dev_descriptor;
                                break;
+                       case HCD_USB25:
+                               bufp = usb25_rh_dev_descriptor;
+                               break;
                        case HCD_USB2:
                                bufp = usb2_rh_dev_descriptor;
                                break;
@@ -546,6 +570,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
                                bufp = ss_rh_config_descriptor;
                                len = sizeof ss_rh_config_descriptor;
                                break;
+                       case HCD_USB25:
                        case HCD_USB2:
                                bufp = hs_rh_config_descriptor;
                                len = sizeof hs_rh_config_descriptor;
@@ -2511,6 +2536,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
        case HCD_USB2:
                rhdev->speed = USB_SPEED_HIGH;
                break;
+       case HCD_USB25:
+               rhdev->speed = USB_SPEED_WIRELESS;
+               break;
        case HCD_USB3:
                rhdev->speed = USB_SPEED_SUPER;
                break;
index feef9351463d99845a379ee3cdd1cb32b6772a4c..4191db32f12c2ccf4901287c6ccb1a357c9b552a 100644 (file)
@@ -718,18 +718,18 @@ static void hub_tt_work(struct work_struct *work)
 
 /**
  * usb_hub_set_port_power - control hub port's power state
- * @hdev: target hub
+ * @hdev: USB device belonging to the usb hub
+ * @hub: target hub
  * @port1: port index
  * @set: expected status
  *
  * call this function to control port's power via setting or
  * clearing the port's PORT_POWER feature.
  */
-int usb_hub_set_port_power(struct usb_device *hdev, int port1,
-               bool set)
+int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
+                          int port1, bool set)
 {
        int ret;
-       struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
        struct usb_port *port_dev = hub->ports[port1 - 1];
 
        if (set)
@@ -1769,15 +1769,17 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
 static int find_port_owner(struct usb_device *hdev, unsigned port1,
                struct dev_state ***ppowner)
 {
+       struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+
        if (hdev->state == USB_STATE_NOTATTACHED)
                return -ENODEV;
        if (port1 == 0 || port1 > hdev->maxchild)
                return -EINVAL;
 
-       /* This assumes that devices not managed by the hub driver
+       /* Devices not managed by the hub driver
         * will always have maxchild equal to 0.
         */
-       *ppowner = &(usb_hub_to_struct_hub(hdev)->ports[port1 - 1]->port_owner);
+       *ppowner = &(hub->ports[port1 - 1]->port_owner);
        return 0;
 }
 
@@ -5323,7 +5325,8 @@ void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1,
 {
        struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
 
-       hub->ports[port1 - 1]->connect_type = type;
+       if (hub)
+               hub->ports[port1 - 1]->connect_type = type;
 }
 
 /**
@@ -5339,6 +5342,9 @@ usb_get_hub_port_connect_type(struct usb_device *hdev, int port1)
 {
        struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
 
+       if (!hub)
+               return USB_PORT_CONNECT_TYPE_UNKNOWN;
+
        return hub->ports[port1 - 1]->connect_type;
 }
 
@@ -5397,6 +5403,9 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
 {
        struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
 
+       if (!hub)
+               return NULL;
+
        return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
 }
 #endif
index 80ab9ee070171a795a93758a271f96767156e68f..6508e02b3dac91718c31dda7e90b6984fed0c898 100644 (file)
@@ -100,7 +100,7 @@ extern int usb_hub_create_port_device(struct usb_hub *hub,
                int port1);
 extern void usb_hub_remove_port_device(struct usb_hub *hub,
                int port1);
-extern int usb_hub_set_port_power(struct usb_device *hdev,
+extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
                int port1, bool set);
 extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);
 extern int hub_port_debounce(struct usb_hub *hub, int port1,
index 444d30e3a78b7f2fb91fcda9156d82401414a67d..e7ee1e451660a4c9a29cee88dc9c97f3aec534c0 100644 (file)
@@ -252,7 +252,7 @@ static void sg_clean(struct usb_sg_request *io)
 {
        if (io->urbs) {
                while (io->entries--)
-                       usb_free_urb(io->urbs [io->entries]);
+                       usb_free_urb(io->urbs[io->entries]);
                kfree(io->urbs);
                io->urbs = NULL;
        }
@@ -300,10 +300,10 @@ static void sg_complete(struct urb *urb)
                 */
                spin_unlock(&io->lock);
                for (i = 0, found = 0; i < io->entries; i++) {
-                       if (!io->urbs [i] || !io->urbs [i]->dev)
+                       if (!io->urbs[i] || !io->urbs[i]->dev)
                                continue;
                        if (found) {
-                               retval = usb_unlink_urb(io->urbs [i]);
+                               retval = usb_unlink_urb(io->urbs[i]);
                                if (retval != -EINPROGRESS &&
                                    retval != -ENODEV &&
                                    retval != -EBUSY &&
@@ -311,7 +311,7 @@ static void sg_complete(struct urb *urb)
                                        dev_err(&io->dev->dev,
                                                "%s, unlink --> %d\n",
                                                __func__, retval);
-                       } else if (urb == io->urbs [i])
+                       } else if (urb == io->urbs[i])
                                found = 1;
                }
                spin_lock(&io->lock);
@@ -379,7 +379,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
        }
 
        /* initialize all the urbs we'll use */
-       io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+       io->urbs = kmalloc(io->entries * sizeof(*io->urbs), mem_flags);
        if (!io->urbs)
                goto nomem;
 
@@ -511,7 +511,7 @@ void usb_sg_wait(struct usb_sg_request *io)
                int retval;
 
                io->urbs[i]->dev = io->dev;
-               retval = usb_submit_urb(io->urbs [i], GFP_ATOMIC);
+               retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC);
 
                /* after we submit, let completions or cancelations fire;
                 * we handshake using io->status.
@@ -586,9 +586,9 @@ void usb_sg_cancel(struct usb_sg_request *io)
                for (i = 0; i < io->entries; i++) {
                        int retval;
 
-                       if (!io->urbs [i]->dev)
+                       if (!io->urbs[i]->dev)
                                continue;
-                       retval = usb_unlink_urb(io->urbs [i]);
+                       retval = usb_unlink_urb(io->urbs[i]);
                        if (retval != -EINPROGRESS
                                        && retval != -ENODEV
                                        && retval != -EBUSY
index b8bad294eeb8d168d58a30e92875cb55c1dbbe81..5fd3fee58f8b9ebc746e2e9372da5be94ee478b8 100644 (file)
@@ -86,7 +86,7 @@ static int usb_port_runtime_resume(struct device *dev)
        usb_autopm_get_interface(intf);
        set_bit(port1, hub->busy_bits);
 
-       retval = usb_hub_set_port_power(hdev, port1, true);
+       retval = usb_hub_set_port_power(hdev, hub, port1, true);
        if (port_dev->child && !retval) {
                /*
                 * Wait for usb hub port to be reconnected in order to make
@@ -128,7 +128,7 @@ static int usb_port_runtime_suspend(struct device *dev)
 
        usb_autopm_get_interface(intf);
        set_bit(port1, hub->busy_bits);
-       retval = usb_hub_set_port_power(hdev, port1, false);
+       retval = usb_hub_set_port_power(hdev, hub, port1, false);
        usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
        usb_clear_port_feature(hdev, port1,     USB_PORT_FEAT_C_ENABLE);
        clear_bit(port1, hub->busy_bits);
index aa38db44818a99ab9331c11d2c670669d014498f..d9284b998bd7238d13918c27094150ff80fd0c6c 100644 (file)
@@ -497,8 +497,62 @@ set_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(usb2_hardware_lpm, S_IRUGO | S_IWUSR, show_usb2_hardware_lpm,
                        set_usb2_hardware_lpm);
 
+static ssize_t
+show_usb2_lpm_l1_timeout(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct usb_device *udev = to_usb_device(dev);
+       return sprintf(buf, "%d\n", udev->l1_params.timeout);
+}
+
+static ssize_t
+set_usb2_lpm_l1_timeout(struct device *dev, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       struct usb_device *udev = to_usb_device(dev);
+       u16 timeout;
+
+       if (kstrtou16(buf, 0, &timeout))
+               return -EINVAL;
+
+       udev->l1_params.timeout = timeout;
+
+       return count;
+}
+
+static DEVICE_ATTR(usb2_lpm_l1_timeout, S_IRUGO | S_IWUSR,
+                  show_usb2_lpm_l1_timeout, set_usb2_lpm_l1_timeout);
+
+static ssize_t
+show_usb2_lpm_besl(struct device *dev, struct device_attribute *attr,
+                  char *buf)
+{
+       struct usb_device *udev = to_usb_device(dev);
+       return sprintf(buf, "%d\n", udev->l1_params.besl);
+}
+
+static ssize_t
+set_usb2_lpm_besl(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct usb_device *udev = to_usb_device(dev);
+       u8 besl;
+
+       if (kstrtou8(buf, 0, &besl) || besl > 15)
+               return -EINVAL;
+
+       udev->l1_params.besl = besl;
+
+       return count;
+}
+
+static DEVICE_ATTR(usb2_lpm_besl, S_IRUGO | S_IWUSR,
+                  show_usb2_lpm_besl, set_usb2_lpm_besl);
+
 static struct attribute *usb2_hardware_lpm_attr[] = {
        &dev_attr_usb2_hardware_lpm.attr,
+       &dev_attr_usb2_lpm_l1_timeout.attr,
+       &dev_attr_usb2_lpm_besl.attr,
        NULL,
 };
 static struct attribute_group usb2_hardware_lpm_attr_group = {
index b10da720f2b4d612385e778578a86b2562eb11ed..7dad603dde4308f77ef69a049427664e06b4be07 100644 (file)
@@ -209,6 +209,39 @@ struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
 }
 EXPORT_SYMBOL_GPL(usb_find_interface);
 
+struct each_dev_arg {
+       void *data;
+       int (*fn)(struct usb_device *, void *);
+};
+
+static int __each_dev(struct device *dev, void *data)
+{
+       struct each_dev_arg *arg = (struct each_dev_arg *)data;
+
+       /* There are struct usb_interface on the same bus, filter them out */
+       if (!is_usb_device(dev))
+               return 0;
+
+       return arg->fn(container_of(dev, struct usb_device, dev), arg->data);
+}
+
+/**
+ * usb_for_each_dev - iterate over all USB devices in the system
+ * @data: data pointer that will be handed to the callback function
+ * @fn: callback function to be called for each USB device
+ *
+ * Iterate over all USB devices and call @fn for each, passing it @data. If it
+ * returns anything other than 0, we break the iteration prematurely and return
+ * that value.
+ */
+int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *))
+{
+       struct each_dev_arg arg = {data, fn};
+
+       return bus_for_each_dev(&usb_bus_type, NULL, &arg, __each_dev);
+}
+EXPORT_SYMBOL_GPL(usb_for_each_dev);
+
 /**
  * usb_release_dev - free a usb device structure when all users of it are finished.
  * @dev: device that's been disconnected
index 34638b92500d0606e1718a3e6ae43a7d3d2d0b46..077f110bd746073931e75fc6c9321cd0b1003aff 100644 (file)
 #define USBOTGSS_REVISION                      0x0000
 #define USBOTGSS_SYSCONFIG                     0x0010
 #define USBOTGSS_IRQ_EOI                       0x0020
+#define USBOTGSS_EOI_OFFSET                    0x0008
 #define USBOTGSS_IRQSTATUS_RAW_0               0x0024
 #define USBOTGSS_IRQSTATUS_0                   0x0028
 #define USBOTGSS_IRQENABLE_SET_0               0x002c
 #define USBOTGSS_IRQENABLE_CLR_0               0x0030
-#define USBOTGSS_IRQSTATUS_RAW_1               0x0034
-#define USBOTGSS_IRQSTATUS_1                   0x0038
-#define USBOTGSS_IRQENABLE_SET_1               0x003c
-#define USBOTGSS_IRQENABLE_CLR_1               0x0040
+#define USBOTGSS_IRQ0_OFFSET                   0x0004
+#define USBOTGSS_IRQSTATUS_RAW_1               0x0030
+#define USBOTGSS_IRQSTATUS_1                   0x0034
+#define USBOTGSS_IRQENABLE_SET_1               0x0038
+#define USBOTGSS_IRQENABLE_CLR_1               0x003c
+#define USBOTGSS_IRQSTATUS_RAW_2               0x0040
+#define USBOTGSS_IRQSTATUS_2                   0x0044
+#define USBOTGSS_IRQENABLE_SET_2               0x0048
+#define USBOTGSS_IRQENABLE_CLR_2               0x004c
+#define USBOTGSS_IRQSTATUS_RAW_3               0x0050
+#define USBOTGSS_IRQSTATUS_3                   0x0054
+#define USBOTGSS_IRQENABLE_SET_3               0x0058
+#define USBOTGSS_IRQENABLE_CLR_3               0x005c
+#define USBOTGSS_IRQSTATUS_EOI_MISC            0x0030
+#define USBOTGSS_IRQSTATUS_RAW_MISC            0x0034
+#define USBOTGSS_IRQSTATUS_MISC                        0x0038
+#define USBOTGSS_IRQENABLE_SET_MISC            0x003c
+#define USBOTGSS_IRQENABLE_CLR_MISC            0x0040
+#define USBOTGSS_IRQMISC_OFFSET                        0x03fc
 #define USBOTGSS_UTMI_OTG_CTRL                 0x0080
 #define USBOTGSS_UTMI_OTG_STATUS               0x0084
+#define USBOTGSS_UTMI_OTG_OFFSET               0x0480
+#define USBOTGSS_TXFIFO_DEPTH                  0x0508
+#define USBOTGSS_RXFIFO_DEPTH                  0x050c
 #define USBOTGSS_MMRAM_OFFSET                  0x0100
 #define USBOTGSS_FLADJ                         0x0104
 #define USBOTGSS_DEBUG_CFG                     0x0108
 #define USBOTGSS_DEBUG_DATA                    0x010c
+#define USBOTGSS_DEV_EBC_EN                    0x0110
+#define USBOTGSS_DEBUG_OFFSET                  0x0600
 
+/* REVISION REGISTER */
+#define USBOTGSS_REVISION_XMAJOR(reg)          ((reg >> 8) & 0x7)
+#define USBOTGSS_REVISION_XMAJOR1              1
+#define USBOTGSS_REVISION_XMAJOR2              2
 /* SYSCONFIG REGISTER */
 #define USBOTGSS_SYSCONFIG_DMADISABLE          (1 << 16)
 
 /* IRQS0 BITS */
 #define USBOTGSS_IRQO_COREIRQ_ST               (1 << 0)
 
-/* IRQ1 BITS */
-#define USBOTGSS_IRQ1_DMADISABLECLR            (1 << 17)
-#define USBOTGSS_IRQ1_OEVT                     (1 << 16)
-#define USBOTGSS_IRQ1_DRVVBUS_RISE             (1 << 13)
-#define USBOTGSS_IRQ1_CHRGVBUS_RISE            (1 << 12)
-#define USBOTGSS_IRQ1_DISCHRGVBUS_RISE         (1 << 11)
-#define USBOTGSS_IRQ1_IDPULLUP_RISE            (1 << 8)
-#define USBOTGSS_IRQ1_DRVVBUS_FALL             (1 << 5)
-#define USBOTGSS_IRQ1_CHRGVBUS_FALL            (1 << 4)
-#define USBOTGSS_IRQ1_DISCHRGVBUS_FALL         (1 << 3)
-#define USBOTGSS_IRQ1_IDPULLUP_FALL            (1 << 0)
+/* IRQMISC BITS */
+#define USBOTGSS_IRQMISC_DMADISABLECLR         (1 << 17)
+#define USBOTGSS_IRQMISC_OEVT                  (1 << 16)
+#define USBOTGSS_IRQMISC_DRVVBUS_RISE          (1 << 13)
+#define USBOTGSS_IRQMISC_CHRGVBUS_RISE         (1 << 12)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_RISE      (1 << 11)
+#define USBOTGSS_IRQMISC_IDPULLUP_RISE         (1 << 8)
+#define USBOTGSS_IRQMISC_DRVVBUS_FALL          (1 << 5)
+#define USBOTGSS_IRQMISC_CHRGVBUS_FALL         (1 << 4)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL              (1 << 3)
+#define USBOTGSS_IRQMISC_IDPULLUP_FALL         (1 << 0)
 
 /* UTMI_OTG_CTRL REGISTER */
 #define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS         (1 << 5)
@@ -122,6 +147,12 @@ struct dwc3_omap {
        void __iomem            *base;
 
        u32                     utmi_otg_status;
+       u32                     utmi_otg_offset;
+       u32                     irqmisc_offset;
+       u32                     irq_eoi_offset;
+       u32                     debug_offset;
+       u32                     irq0_offset;
+       u32                     revision;
 
        u32                     dma_status:1;
 };
@@ -138,6 +169,58 @@ static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
        writel(value, base + offset);
 }
 
+static u32 dwc3_omap_read_utmi_status(struct dwc3_omap *omap)
+{
+       return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS +
+                                                       omap->utmi_otg_offset);
+}
+
+static void dwc3_omap_write_utmi_status(struct dwc3_omap *omap, u32 value)
+{
+       dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS +
+                                       omap->utmi_otg_offset, value);
+
+}
+
+static u32 dwc3_omap_read_irq0_status(struct dwc3_omap *omap)
+{
+       return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_0 -
+                                               omap->irq0_offset);
+}
+
+static void dwc3_omap_write_irq0_status(struct dwc3_omap *omap, u32 value)
+{
+       dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_0 -
+                                               omap->irq0_offset, value);
+
+}
+
+static u32 dwc3_omap_read_irqmisc_status(struct dwc3_omap *omap)
+{
+       return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_MISC +
+                                               omap->irqmisc_offset);
+}
+
+static void dwc3_omap_write_irqmisc_status(struct dwc3_omap *omap, u32 value)
+{
+       dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_MISC +
+                                       omap->irqmisc_offset, value);
+
+}
+
+static void dwc3_omap_write_irqmisc_set(struct dwc3_omap *omap, u32 value)
+{
+       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_MISC +
+                                               omap->irqmisc_offset, value);
+
+}
+
+static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
+{
+       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_0 -
+                                               omap->irq0_offset, value);
+}
+
 int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
 {
        u32                     val;
@@ -150,38 +233,38 @@ int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
        case OMAP_DWC3_ID_GROUND:
                dev_dbg(omap->dev, "ID GND\n");
 
-               val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+               val = dwc3_omap_read_utmi_status(omap);
                val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG
                                | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
                                | USBOTGSS_UTMI_OTG_STATUS_SESSEND);
                val |= USBOTGSS_UTMI_OTG_STATUS_SESSVALID
                                | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
-               dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
+               dwc3_omap_write_utmi_status(omap, val);
                break;
 
        case OMAP_DWC3_VBUS_VALID:
                dev_dbg(omap->dev, "VBUS Connect\n");
 
-               val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+               val = dwc3_omap_read_utmi_status(omap);
                val &= ~USBOTGSS_UTMI_OTG_STATUS_SESSEND;
                val |= USBOTGSS_UTMI_OTG_STATUS_IDDIG
                                | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
                                | USBOTGSS_UTMI_OTG_STATUS_SESSVALID
                                | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
-               dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
+               dwc3_omap_write_utmi_status(omap, val);
                break;
 
        case OMAP_DWC3_ID_FLOAT:
        case OMAP_DWC3_VBUS_OFF:
                dev_dbg(omap->dev, "VBUS Disconnect\n");
 
-               val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+               val = dwc3_omap_read_utmi_status(omap);
                val &= ~(USBOTGSS_UTMI_OTG_STATUS_SESSVALID
                                | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
                                | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT);
                val |= USBOTGSS_UTMI_OTG_STATUS_SESSEND
                                | USBOTGSS_UTMI_OTG_STATUS_IDDIG;
-               dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
+               dwc3_omap_write_utmi_status(omap, val);
                break;
 
        default:
@@ -199,44 +282,45 @@ static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
 
        spin_lock(&omap->lock);
 
-       reg = dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_1);
+       reg = dwc3_omap_read_irqmisc_status(omap);
 
-       if (reg & USBOTGSS_IRQ1_DMADISABLECLR) {
+       if (reg & USBOTGSS_IRQMISC_DMADISABLECLR) {
                dev_dbg(omap->dev, "DMA Disable was Cleared\n");
                omap->dma_status = false;
        }
 
-       if (reg & USBOTGSS_IRQ1_OEVT)
+       if (reg & USBOTGSS_IRQMISC_OEVT)
                dev_dbg(omap->dev, "OTG Event\n");
 
-       if (reg & USBOTGSS_IRQ1_DRVVBUS_RISE)
+       if (reg & USBOTGSS_IRQMISC_DRVVBUS_RISE)
                dev_dbg(omap->dev, "DRVVBUS Rise\n");
 
-       if (reg & USBOTGSS_IRQ1_CHRGVBUS_RISE)
+       if (reg & USBOTGSS_IRQMISC_CHRGVBUS_RISE)
                dev_dbg(omap->dev, "CHRGVBUS Rise\n");
 
-       if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_RISE)
+       if (reg & USBOTGSS_IRQMISC_DISCHRGVBUS_RISE)
                dev_dbg(omap->dev, "DISCHRGVBUS Rise\n");
 
-       if (reg & USBOTGSS_IRQ1_IDPULLUP_RISE)
+       if (reg & USBOTGSS_IRQMISC_IDPULLUP_RISE)
                dev_dbg(omap->dev, "IDPULLUP Rise\n");
 
-       if (reg & USBOTGSS_IRQ1_DRVVBUS_FALL)
+       if (reg & USBOTGSS_IRQMISC_DRVVBUS_FALL)
                dev_dbg(omap->dev, "DRVVBUS Fall\n");
 
-       if (reg & USBOTGSS_IRQ1_CHRGVBUS_FALL)
+       if (reg & USBOTGSS_IRQMISC_CHRGVBUS_FALL)
                dev_dbg(omap->dev, "CHRGVBUS Fall\n");
 
-       if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_FALL)
+       if (reg & USBOTGSS_IRQMISC_DISCHRGVBUS_FALL)
                dev_dbg(omap->dev, "DISCHRGVBUS Fall\n");
 
-       if (reg & USBOTGSS_IRQ1_IDPULLUP_FALL)
+       if (reg & USBOTGSS_IRQMISC_IDPULLUP_FALL)
                dev_dbg(omap->dev, "IDPULLUP Fall\n");
 
-       dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_1, reg);
+       dwc3_omap_write_irqmisc_status(omap, reg);
+
+       reg = dwc3_omap_read_irq0_status(omap);
 
-       reg = dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_0);
-       dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_0, reg);
+       dwc3_omap_write_irq0_status(omap, reg);
 
        spin_unlock(&omap->lock);
 
@@ -258,26 +342,26 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
 
        /* enable all IRQs */
        reg = USBOTGSS_IRQO_COREIRQ_ST;
-       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_0, reg);
-
-       reg = (USBOTGSS_IRQ1_OEVT |
-                       USBOTGSS_IRQ1_DRVVBUS_RISE |
-                       USBOTGSS_IRQ1_CHRGVBUS_RISE |
-                       USBOTGSS_IRQ1_DISCHRGVBUS_RISE |
-                       USBOTGSS_IRQ1_IDPULLUP_RISE |
-                       USBOTGSS_IRQ1_DRVVBUS_FALL |
-                       USBOTGSS_IRQ1_CHRGVBUS_FALL |
-                       USBOTGSS_IRQ1_DISCHRGVBUS_FALL |
-                       USBOTGSS_IRQ1_IDPULLUP_FALL);
-
-       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, reg);
+       dwc3_omap_write_irq0_set(omap, reg);
+
+       reg = (USBOTGSS_IRQMISC_OEVT |
+                       USBOTGSS_IRQMISC_DRVVBUS_RISE |
+                       USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_IDPULLUP_RISE |
+                       USBOTGSS_IRQMISC_DRVVBUS_FALL |
+                       USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+       dwc3_omap_write_irqmisc_set(omap, reg);
 }
 
 static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
 {
        /* disable all IRQs */
-       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, 0x00);
-       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_0, 0x00);
+       dwc3_omap_write_irqmisc_set(omap, 0x00);
+       dwc3_omap_write_irq0_set(omap, 0x00);
 }
 
 static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
@@ -294,6 +378,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        int                     irq;
 
        int                     utmi_mode = 0;
+       int                     x_major;
 
        u32                     reg;
 
@@ -347,10 +432,46 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
                dev_err(dev, "get_sync failed with err %d\n", ret);
-               return ret;
+               goto err0;
        }
 
-       reg = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+       reg = dwc3_omap_readl(omap->base, USBOTGSS_REVISION);
+       omap->revision = reg;
+       x_major = USBOTGSS_REVISION_XMAJOR(reg);
+
+       /* Differentiate between OMAP5 and AM437x */
+       switch (x_major) {
+       case USBOTGSS_REVISION_XMAJOR1:
+       case USBOTGSS_REVISION_XMAJOR2:
+               omap->irq_eoi_offset = 0;
+               omap->irq0_offset = 0;
+               omap->irqmisc_offset = 0;
+               omap->utmi_otg_offset = 0;
+               omap->debug_offset = 0;
+               break;
+       default:
+               /* Default to the latest revision */
+               omap->irq_eoi_offset = USBOTGSS_EOI_OFFSET;
+               omap->irq0_offset = USBOTGSS_IRQ0_OFFSET;
+               omap->irqmisc_offset = USBOTGSS_IRQMISC_OFFSET;
+               omap->utmi_otg_offset = USBOTGSS_UTMI_OTG_OFFSET;
+               omap->debug_offset = USBOTGSS_DEBUG_OFFSET;
+               break;
+       }
+
+       /* For OMAP5(ES2.0) and AM437x x_major is 2 even though there are
+        * changes in wrapper registers, Using dt compatible for aegis
+        */
+
+       if (of_device_is_compatible(node, "ti,am437x-dwc3")) {
+               omap->irq_eoi_offset = USBOTGSS_EOI_OFFSET;
+               omap->irq0_offset = USBOTGSS_IRQ0_OFFSET;
+               omap->irqmisc_offset = USBOTGSS_IRQMISC_OFFSET;
+               omap->utmi_otg_offset = USBOTGSS_UTMI_OTG_OFFSET;
+               omap->debug_offset = USBOTGSS_DEBUG_OFFSET;
+       }
+
+       reg = dwc3_omap_read_utmi_status(omap);
 
        of_property_read_u32(node, "utmi-mode", &utmi_mode);
 
@@ -365,7 +486,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
                dev_dbg(dev, "UNKNOWN utmi mode %d\n", utmi_mode);
        }
 
-       dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, reg);
+       dwc3_omap_write_utmi_status(omap, reg);
 
        /* check the DMA Status */
        reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
@@ -376,7 +497,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(dev, "failed to request IRQ #%d --> %d\n",
                                omap->irq, ret);
-               return ret;
+               goto err1;
        }
 
        dwc3_omap_enable_irqs(omap);
@@ -384,10 +505,21 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        ret = of_platform_populate(node, NULL, NULL, dev);
        if (ret) {
                dev_err(&pdev->dev, "failed to create dwc3 core\n");
-               return ret;
+               goto err2;
        }
 
        return 0;
+
+err2:
+       dwc3_omap_disable_irqs(omap);
+
+err1:
+       pm_runtime_put_sync(dev);
+
+err0:
+       pm_runtime_disable(dev);
+
+       return ret;
 }
 
 static int dwc3_omap_remove(struct platform_device *pdev)
@@ -406,6 +538,9 @@ static const struct of_device_id of_dwc3_match[] = {
        {
                .compatible =   "ti,dwc3"
        },
+       {
+               .compatible =   "ti,am437x-dwc3"
+       },
        { },
 };
 MODULE_DEVICE_TABLE(of, of_dwc3_match);
@@ -431,8 +566,7 @@ static int dwc3_omap_suspend(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
 
-       omap->utmi_otg_status = dwc3_omap_readl(omap->base,
-                       USBOTGSS_UTMI_OTG_STATUS);
+       omap->utmi_otg_status = dwc3_omap_read_utmi_status(omap);
 
        return 0;
 }
@@ -441,8 +575,7 @@ static int dwc3_omap_resume(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
 
-       dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS,
-                       omap->utmi_otg_status);
+       dwc3_omap_write_utmi_status(omap, omap->utmi_otg_status);
 
        pm_runtime_disable(dev);
        pm_runtime_set_active(dev);
index eba9e2baf32b3d23a78977342286a2a8be5506aa..ed07ec04a9622526b94666acc4f7390bcac56e25 100644 (file)
@@ -133,7 +133,6 @@ static int dwc3_pci_probe(struct pci_dev *pci,
                return -ENODEV;
        }
 
-       pci_set_power_state(pci, PCI_D0);
        pci_set_master(pci);
 
        ret = dwc3_pci_register_phys(glue);
index f41aa0d0c414312163bf2076935fb96adccd7036..01b8229fa86207848c02325b20b8229ea3aa152c 100644 (file)
@@ -192,6 +192,16 @@ config USB_FUSB300
        help
           Faraday usb device controller FUSB300 driver
 
+config USB_FOTG210_UDC
+       tristate "Faraday FOTG210 USB Peripheral Controller"
+       help
+          Faraday USB2.0 OTG controller which can be configured as
+          high speed or full speed USB device. This driver supppors
+          Bulk Transfer so far.
+
+          Say "y" to link the driver statically, or "m" to build a
+          dynamically linked module called "fotg210_udc".
+
 config USB_OMAP
        tristate "OMAP USB Device Controller"
        depends on ARCH_OMAP1
@@ -334,14 +344,6 @@ config USB_MV_U3D
 # Controllers available in both integrated and discrete versions
 #
 
-# musb builds in ../musb along with host support
-config USB_GADGET_MUSB_HDRC
-       tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)"
-       depends on USB_MUSB_HDRC
-       help
-         This OTG-capable silicon IP is used in dual designs including
-         the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin
-
 config USB_M66592
        tristate "Renesas M66592 USB Peripheral Controller"
        help
@@ -507,12 +509,36 @@ config USB_F_SS_LB
 config USB_U_SERIAL
        tristate
 
+config USB_U_ETHER
+       tristate
+
+config USB_U_RNDIS
+       tristate
+
 config USB_F_SERIAL
        tristate
 
 config USB_F_OBEX
        tristate
 
+config USB_F_NCM
+       tristate
+
+config USB_F_ECM
+       tristate
+
+config USB_F_PHONET
+       tristate
+
+config USB_F_EEM
+       tristate
+
+config USB_F_SUBSET
+       tristate
+
+config USB_F_RNDIS
+       tristate
+
 choice
        tristate "USB Gadget Drivers"
        default USB_ETH
@@ -534,6 +560,121 @@ choice
 
 # this first set of drivers all depend on bulk-capable hardware.
 
+config USB_CONFIGFS
+       tristate "USB functions configurable through configfs"
+       select USB_LIBCOMPOSITE
+       help
+         A Linux USB "gadget" can be set up through configfs.
+         If this is the case, the USB functions (which from the host's
+         perspective are seen as interfaces) and configurations are
+         specified simply by creating appropriate directories in configfs.
+         Associating functions with configurations is done by creating
+         appropriate symbolic links.
+         For more information see Documentation/usb/gadget-configfs.txt.
+
+config USB_CONFIGFS_SERIAL
+       boolean "Generic serial bulk in/out"
+       depends on USB_CONFIGFS
+       depends on TTY
+       select USB_U_SERIAL
+       select USB_F_SERIAL
+       help
+         The function talks to the Linux-USB generic serial driver.
+
+config USB_CONFIGFS_ACM
+       boolean "Abstract Control Model (CDC ACM)"
+       depends on USB_CONFIGFS
+       depends on TTY
+       select USB_U_SERIAL
+       select USB_F_ACM
+       help
+         ACM serial link.  This function can be used to interoperate with
+         MS-Windows hosts or with the Linux-USB "cdc-acm" driver.
+
+config USB_CONFIGFS_OBEX
+       boolean "Object Exchange Model (CDC OBEX)"
+       depends on USB_CONFIGFS
+       depends on TTY
+       select USB_U_SERIAL
+       select USB_F_OBEX
+       help
+         You will need a user space OBEX server talking to /dev/ttyGS*,
+         since the kernel itself doesn't implement the OBEX protocol.
+
+config USB_CONFIGFS_NCM
+       boolean "Network Control Model (CDC NCM)"
+       depends on USB_CONFIGFS
+       depends on NET
+       select USB_U_ETHER
+       select USB_F_NCM
+       help
+         NCM is an advanced protocol for Ethernet encapsulation, allows
+         grouping of several ethernet frames into one USB transfer and
+         different alignment possibilities.
+
+config USB_CONFIGFS_ECM
+       boolean "Ethernet Control Model (CDC ECM)"
+       depends on USB_CONFIGFS
+       depends on NET
+       select USB_U_ETHER
+       select USB_F_ECM
+       help
+         The "Communication Device Class" (CDC) Ethernet Control Model.
+         That protocol is often avoided with pure Ethernet adapters, in
+         favor of simpler vendor-specific hardware, but is widely
+         supported by firmware for smart network devices.
+
+config USB_CONFIGFS_ECM_SUBSET
+       boolean "Ethernet Control Model (CDC ECM) subset"
+       depends on USB_CONFIGFS
+       depends on NET
+       select USB_U_ETHER
+       select USB_F_SUBSET
+       help
+         On hardware that can't implement the full protocol,
+         a simple CDC subset is used, placing fewer demands on USB.
+
+config USB_CONFIGFS_RNDIS
+       bool "RNDIS"
+       depends on USB_CONFIGFS
+       depends on NET
+       select USB_U_ETHER
+       select USB_F_RNDIS
+       help
+          Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
+          and Microsoft provides redistributable binary RNDIS drivers for
+          older versions of Windows.
+
+          To make MS-Windows work with this, use Documentation/usb/linux.inf
+          as the "driver info file".  For versions of MS-Windows older than
+          XP, you'll need to download drivers from Microsoft's website; a URL
+          is given in comments found in that info file.
+
+config USB_CONFIGFS_EEM
+       bool "Ethernet Emulation Model (EEM)"
+       depends on USB_CONFIGFS
+       depends on NET
+       select USB_U_ETHER
+       select USB_F_EEM
+       help
+         CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
+         and therefore can be supported by more hardware.  Technically ECM and
+         EEM are designed for different applications.  The ECM model extends
+         the network interface to the target (e.g. a USB cable modem), and the
+         EEM model is for mobile devices to communicate with hosts using
+         ethernet over USB.  For Linux gadgets, however, the interface with
+         the host is the same (a usbX device), so the differences are minimal.
+
+config USB_CONFIGFS_PHONET
+       boolean "Phonet protocol"
+       depends on USB_CONFIGFS
+       depends on NET
+       depends on PHONET
+       select USB_U_ETHER
+       select USB_F_PHONET
+       help
+         The Phonet protocol implementation for USB device.
+
 config USB_ZERO
        tristate "Gadget Zero (DEVELOPMENT)"
        select USB_LIBCOMPOSITE
@@ -603,6 +744,10 @@ config USB_ETH
        tristate "Ethernet Gadget (with CDC Ethernet support)"
        depends on NET
        select USB_LIBCOMPOSITE
+       select USB_U_ETHER
+       select USB_U_RNDIS
+       select USB_F_ECM
+       select USB_F_SUBSET
        select CRC32
        help
          This driver implements Ethernet style communication, in one of
@@ -639,6 +784,7 @@ config USB_ETH_RNDIS
        bool "RNDIS support"
        depends on USB_ETH
        select USB_LIBCOMPOSITE
+       select USB_F_RNDIS
        default y
        help
           Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
@@ -658,6 +804,7 @@ config USB_ETH_EEM
        bool "Ethernet Emulation Model (EEM) support"
        depends on USB_ETH
        select USB_LIBCOMPOSITE
+       select USB_F_EEM
        default n
        help
          CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
@@ -675,6 +822,8 @@ config USB_G_NCM
        tristate "Network Control Model (NCM) support"
        depends on NET
        select USB_LIBCOMPOSITE
+       select USB_U_ETHER
+       select USB_F_NCM
        select CRC32
        help
          This driver implements USB CDC NCM subclass standard. NCM is
@@ -718,6 +867,7 @@ config USB_FUNCTIONFS
 config USB_FUNCTIONFS_ETH
        bool "Include configuration with CDC ECM (Ethernet)"
        depends on USB_FUNCTIONFS && NET
+       select USB_U_ETHER
        help
          Include a configuration with CDC ECM function (Ethernet) and the
          Function Filesystem.
@@ -725,6 +875,8 @@ config USB_FUNCTIONFS_ETH
 config USB_FUNCTIONFS_RNDIS
        bool "Include configuration with RNDIS (Ethernet)"
        depends on USB_FUNCTIONFS && NET
+       select USB_U_ETHER
+       select USB_U_RNDIS
        help
          Include a configuration with RNDIS function (Ethernet) and the Filesystem.
 
@@ -825,7 +977,9 @@ config USB_CDC_COMPOSITE
        depends on NET
        select USB_LIBCOMPOSITE
        select USB_U_SERIAL
+       select USB_U_ETHER
        select USB_F_ACM
+       select USB_F_ECM
        help
          This driver provides two functions in one configuration:
          a CDC Ethernet (ECM) link, and a CDC ACM (serial port) link.
@@ -842,7 +996,11 @@ config USB_G_NOKIA
        depends on PHONET
        select USB_LIBCOMPOSITE
        select USB_U_SERIAL
+       select USB_U_ETHER
        select USB_F_ACM
+       select USB_F_OBEX
+       select USB_F_PHONET
+       select USB_F_ECM
        help
          The Nokia composite gadget provides support for acm, obex
          and phonet in only one composite gadget driver.
@@ -869,6 +1027,8 @@ config USB_G_MULTI
        select USB_G_MULTI_CDC if !USB_G_MULTI_RNDIS
        select USB_LIBCOMPOSITE
        select USB_U_SERIAL
+       select USB_U_ETHER
+       select USB_U_RNDIS
        select USB_F_ACM
        help
          The Multifunction Composite Gadget provides Ethernet (RNDIS
index 6afd16659e78d719c8fe7a5bf3702fa40329eba4..bad08e66f3697abacb3327eb3f197f61ad388ad7 100644 (file)
@@ -33,6 +33,7 @@ obj-$(CONFIG_USB_EG20T)               += pch_udc.o
 obj-$(CONFIG_USB_MV_UDC)       += mv_udc.o
 mv_udc-y                       := mv_udc_core.o
 obj-$(CONFIG_USB_FUSB300)      += fusb300_udc.o
+obj-$(CONFIG_USB_FOTG210_UDC)  += fotg210-udc.o
 obj-$(CONFIG_USB_MV_U3D)       += mv_u3d_core.o
 
 # USB Functions
@@ -45,6 +46,21 @@ usb_f_serial-y                       := f_serial.o
 obj-$(CONFIG_USB_F_SERIAL)     += usb_f_serial.o
 usb_f_obex-y                   := f_obex.o
 obj-$(CONFIG_USB_F_OBEX)       += usb_f_obex.o
+obj-$(CONFIG_USB_U_ETHER)      += u_ether.o
+u_rndis-y                      := rndis.o
+obj-$(CONFIG_USB_U_RNDIS)      += u_rndis.o
+usb_f_ncm-y                    := f_ncm.o
+obj-$(CONFIG_USB_F_NCM)                += usb_f_ncm.o
+usb_f_ecm-y                    := f_ecm.o
+obj-$(CONFIG_USB_F_ECM)                += usb_f_ecm.o
+usb_f_phonet-y                 := f_phonet.o
+obj-$(CONFIG_USB_F_PHONET)     += usb_f_phonet.o
+usb_f_eem-y                    := f_eem.o
+obj-$(CONFIG_USB_F_EEM)                += usb_f_eem.o
+usb_f_ecm_subset-y             := f_subset.o
+obj-$(CONFIG_USB_F_SUBSET)     += usb_f_ecm_subset.o
+usb_f_rndis-y                  := f_rndis.o
+obj-$(CONFIG_USB_F_RNDIS)      += usb_f_rndis.o
 
 #
 # USB gadget drivers
index 2c52551827692c5bd119bf78513e32e3f0fd7116..5a5acf22c694514f5b3ebede7903f9912e4138b5 100644 (file)
@@ -15,6 +15,7 @@
 
 #include "u_ether.h"
 #include "u_serial.h"
+#include "u_ecm.h"
 
 
 #define DRIVER_DESC            "CDC Composite Gadget"
 #define CDC_VENDOR_NUM         0x0525  /* NetChip */
 #define CDC_PRODUCT_NUM                0xa4aa  /* CDC Composite: ECM + ACM */
 
-/*-------------------------------------------------------------------------*/
 USB_GADGET_COMPOSITE_OPTIONS();
 
-/*
- * Kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module.  So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#include "f_ecm.c"
-#include "u_ether.c"
+USB_ETHERNET_MODULE_PARAMETERS();
 
 /*-------------------------------------------------------------------------*/
 
@@ -102,12 +94,13 @@ static struct usb_gadget_strings *dev_strings[] = {
        NULL,
 };
 
-static u8 hostaddr[ETH_ALEN];
-static struct eth_dev *the_dev;
 /*-------------------------------------------------------------------------*/
 static struct usb_function *f_acm;
 static struct usb_function_instance *fi_serial;
 
+static struct usb_function *f_ecm;
+static struct usb_function_instance *fi_ecm;
+
 /*
  * We _always_ have both CDC ECM and CDC ACM functions.
  */
@@ -120,13 +113,27 @@ static int __init cdc_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       status = ecm_bind_config(c, hostaddr, the_dev);
-       if (status < 0)
-               return status;
+       fi_ecm = usb_get_function_instance("ecm");
+       if (IS_ERR(fi_ecm)) {
+               status = PTR_ERR(fi_ecm);
+               goto err_func_ecm;
+       }
+
+       f_ecm = usb_get_function(fi_ecm);
+       if (IS_ERR(f_ecm)) {
+               status = PTR_ERR(f_ecm);
+               goto err_get_ecm;
+       }
+
+       status = usb_add_function(c, f_ecm);
+       if (status)
+               goto err_add_ecm;
 
        fi_serial = usb_get_function_instance("acm");
-       if (IS_ERR(fi_serial))
-               return PTR_ERR(fi_serial);
+       if (IS_ERR(fi_serial)) {
+               status = PTR_ERR(fi_serial);
+               goto err_get_acm;
+       }
 
        f_acm = usb_get_function(fi_serial);
        if (IS_ERR(f_acm)) {
@@ -136,12 +143,21 @@ static int __init cdc_do_config(struct usb_configuration *c)
 
        status = usb_add_function(c, f_acm);
        if (status)
-               goto err_conf;
+               goto err_add_acm;
+
        return 0;
-err_conf:
+
+err_add_acm:
        usb_put_function(f_acm);
 err_func_acm:
        usb_put_function_instance(fi_serial);
+err_get_acm:
+       usb_remove_function(c, f_ecm);
+err_add_ecm:
+       usb_put_function(f_ecm);
+err_get_ecm:
+       usb_put_function_instance(fi_ecm);
+err_func_ecm:
        return status;
 }
 
@@ -157,6 +173,7 @@ static struct usb_configuration cdc_config_driver = {
 static int __init cdc_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
+       struct f_ecm_opts       *ecm_opts;
        int                     status;
 
        if (!can_support_ecm(cdev->gadget)) {
@@ -165,10 +182,23 @@ static int __init cdc_bind(struct usb_composite_dev *cdev)
                return -EINVAL;
        }
 
-       /* set up network link layer */
-       the_dev = gether_setup(cdev->gadget, hostaddr);
-       if (IS_ERR(the_dev))
-               return PTR_ERR(the_dev);
+       fi_ecm = usb_get_function_instance("ecm");
+       if (IS_ERR(fi_ecm))
+               return PTR_ERR(fi_ecm);
+
+       ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
+
+       gether_set_qmult(ecm_opts->net, qmult);
+       if (!gether_set_host_addr(ecm_opts->net, host_addr))
+               pr_info("using host ethernet address: %s", host_addr);
+       if (!gether_set_dev_addr(ecm_opts->net, dev_addr))
+               pr_info("using self ethernet address: %s", dev_addr);
+
+       fi_serial = usb_get_function_instance("acm");
+       if (IS_ERR(fi_serial)) {
+               status = PTR_ERR(fi_serial);
+               goto fail;
+       }
 
        /* Allocate string descriptor numbers ... note that string
         * contents can be overridden by the composite_dev glue.
@@ -192,7 +222,9 @@ static int __init cdc_bind(struct usb_composite_dev *cdev)
        return 0;
 
 fail1:
-       gether_cleanup(the_dev);
+       usb_put_function_instance(fi_serial);
+fail:
+       usb_put_function_instance(fi_ecm);
        return status;
 }
 
@@ -200,7 +232,10 @@ static int __exit cdc_unbind(struct usb_composite_dev *cdev)
 {
        usb_put_function(f_acm);
        usb_put_function_instance(fi_serial);
-       gether_cleanup(the_dev);
+       if (!IS_ERR_OR_NULL(f_ecm))
+               usb_put_function(f_ecm);
+       if (!IS_ERR_OR_NULL(fi_ecm))
+               usb_put_function_instance(fi_ecm);
        return 0;
 }
 
index 56c8ecae9bc3213781b762e697ad626c23063e59..f48712ffe261470487f7be0476ad7d89d8a808cf 100644 (file)
@@ -14,6 +14,7 @@
 /* #define VERBOSE_DEBUG */
 
 #include <linux/kernel.h>
+#include <linux/netdevice.h>
 
 #if defined USB_ETH_RNDIS
 #  undef USB_ETH_RNDIS
@@ -91,27 +92,23 @@ static inline bool has_rndis(void)
 #endif
 }
 
-/*-------------------------------------------------------------------------*/
+#include <linux/module.h>
 
-/*
- * Kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module.  So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#include "f_ecm.c"
-#include "f_subset.c"
+#include "u_ecm.h"
+#include "u_gether.h"
 #ifdef USB_ETH_RNDIS
-#include "f_rndis.c"
-#include "rndis.c"
+#include "u_rndis.h"
+#include "rndis.h"
+#else
+#define rndis_borrow_net(...) do {} while (0)
 #endif
-#include "f_eem.c"
-#include "u_ether.c"
+#include "u_eem.h"
 
 /*-------------------------------------------------------------------------*/
 USB_GADGET_COMPOSITE_OPTIONS();
 
+USB_ETHERNET_MODULE_PARAMETERS();
+
 /* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
  * Instead:  allocate your own, using normal USB-IF procedures.
  */
@@ -206,8 +203,18 @@ static struct usb_gadget_strings *dev_strings[] = {
        NULL,
 };
 
-static u8 hostaddr[ETH_ALEN];
-static struct eth_dev *the_dev;
+static struct usb_function_instance *fi_ecm;
+static struct usb_function *f_ecm;
+
+static struct usb_function_instance *fi_eem;
+static struct usb_function *f_eem;
+
+static struct usb_function_instance *fi_geth;
+static struct usb_function *f_geth;
+
+static struct usb_function_instance *fi_rndis;
+static struct usb_function *f_rndis;
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -217,6 +224,8 @@ static struct eth_dev *the_dev;
  */
 static int __init rndis_do_config(struct usb_configuration *c)
 {
+       int status;
+
        /* FIXME alloc iConfiguration string, set it in c->strings */
 
        if (gadget_is_otg(c->cdev->gadget)) {
@@ -224,7 +233,15 @@ static int __init rndis_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       return rndis_bind_config(c, hostaddr, the_dev);
+       f_rndis = usb_get_function(fi_rndis);
+       if (IS_ERR(f_rndis))
+               return PTR_ERR(f_rndis);
+
+       status = usb_add_function(c, f_rndis);
+       if (status < 0)
+               usb_put_function(f_rndis);
+
+       return status;
 }
 
 static struct usb_configuration rndis_config_driver = {
@@ -249,6 +266,8 @@ MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
  */
 static int __init eth_do_config(struct usb_configuration *c)
 {
+       int status = 0;
+
        /* FIXME alloc iConfiguration string, set it in c->strings */
 
        if (gadget_is_otg(c->cdev->gadget)) {
@@ -256,12 +275,38 @@ static int __init eth_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       if (use_eem)
-               return eem_bind_config(c, the_dev);
-       else if (can_support_ecm(c->cdev->gadget))
-               return ecm_bind_config(c, hostaddr, the_dev);
-       else
-               return geth_bind_config(c, hostaddr, the_dev);
+       if (use_eem) {
+               f_eem = usb_get_function(fi_eem);
+               if (IS_ERR(f_eem))
+                       return PTR_ERR(f_eem);
+
+               status = usb_add_function(c, f_eem);
+               if (status < 0)
+                       usb_put_function(f_eem);
+
+               return status;
+       } else if (can_support_ecm(c->cdev->gadget)) {
+               f_ecm = usb_get_function(fi_ecm);
+               if (IS_ERR(f_ecm))
+                       return PTR_ERR(f_ecm);
+
+               status = usb_add_function(c, f_ecm);
+               if (status < 0)
+                       usb_put_function(f_ecm);
+
+               return status;
+       } else {
+               f_geth = usb_get_function(fi_geth);
+               if (IS_ERR(f_geth))
+                       return PTR_ERR(f_geth);
+
+               status = usb_add_function(c, f_geth);
+               if (status < 0)
+                       usb_put_function(f_geth);
+
+               return status;
+       }
+
 }
 
 static struct usb_configuration eth_config_driver = {
@@ -276,24 +321,50 @@ static struct usb_configuration eth_config_driver = {
 static int __init eth_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
+       struct f_eem_opts       *eem_opts = NULL;
+       struct f_ecm_opts       *ecm_opts = NULL;
+       struct f_gether_opts    *geth_opts = NULL;
+       struct net_device       *net;
        int                     status;
 
-       /* set up network link layer */
-       the_dev = gether_setup(cdev->gadget, hostaddr);
-       if (IS_ERR(the_dev))
-               return PTR_ERR(the_dev);
-
        /* set up main config label and device descriptor */
        if (use_eem) {
                /* EEM */
+               fi_eem = usb_get_function_instance("eem");
+               if (IS_ERR(fi_eem))
+                       return PTR_ERR(fi_eem);
+
+               eem_opts = container_of(fi_eem, struct f_eem_opts, func_inst);
+
+               net = eem_opts->net;
+
                eth_config_driver.label = "CDC Ethernet (EEM)";
                device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM);
                device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM);
-       } else if (can_support_ecm(cdev->gadget)) {
+       } else if (can_support_ecm(gadget)) {
                /* ECM */
+
+               fi_ecm = usb_get_function_instance("ecm");
+               if (IS_ERR(fi_ecm))
+                       return PTR_ERR(fi_ecm);
+
+               ecm_opts = container_of(fi_ecm, struct f_ecm_opts, func_inst);
+
+               net = ecm_opts->net;
+
                eth_config_driver.label = "CDC Ethernet (ECM)";
        } else {
                /* CDC Subset */
+
+               fi_geth = usb_get_function_instance("geth");
+               if (IS_ERR(fi_geth))
+                       return PTR_ERR(fi_geth);
+
+               geth_opts = container_of(fi_geth, struct f_gether_opts,
+                                        func_inst);
+
+               net = geth_opts->net;
+
                eth_config_driver.label = "CDC Subset/SAFE";
 
                device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM);
@@ -302,8 +373,34 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
                        device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
        }
 
+       gether_set_qmult(net, qmult);
+       if (!gether_set_host_addr(net, host_addr))
+               pr_info("using host ethernet address: %s", host_addr);
+       if (!gether_set_dev_addr(net, dev_addr))
+               pr_info("using self ethernet address: %s", dev_addr);
+
        if (has_rndis()) {
                /* RNDIS plus ECM-or-Subset */
+               gether_set_gadget(net, cdev->gadget);
+               status = gether_register_netdev(net);
+               if (status)
+                       goto fail;
+
+               if (use_eem)
+                       eem_opts->bound = true;
+               else if (can_support_ecm(gadget))
+                       ecm_opts->bound = true;
+               else
+                       geth_opts->bound = true;
+
+               fi_rndis = usb_get_function_instance("rndis");
+               if (IS_ERR(fi_rndis)) {
+                       status = PTR_ERR(fi_rndis);
+                       goto fail;
+               }
+
+               rndis_borrow_net(fi_rndis, net);
+
                device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM);
                device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM);
                device_desc.bNumConfigurations = 2;
@@ -315,7 +412,7 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
 
        status = usb_string_ids_tab(cdev, strings_dev);
        if (status < 0)
-               goto fail;
+               goto fail1;
        device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
        device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
@@ -324,12 +421,12 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
                status = usb_add_config(cdev, &rndis_config_driver,
                                rndis_do_config);
                if (status < 0)
-                       goto fail;
+                       goto fail1;
        }
 
        status = usb_add_config(cdev, &eth_config_driver, eth_do_config);
        if (status < 0)
-               goto fail;
+               goto fail1;
 
        usb_composite_overwrite_options(cdev, &coverwrite);
        dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
@@ -337,14 +434,29 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
 
        return 0;
 
+fail1:
+       if (has_rndis())
+               usb_put_function_instance(fi_rndis);
 fail:
-       gether_cleanup(the_dev);
+       if (use_eem)
+               usb_put_function_instance(fi_eem);
+       else if (can_support_ecm(gadget))
+               usb_put_function_instance(fi_ecm);
+       else
+               usb_put_function_instance(fi_geth);
        return status;
 }
 
 static int __exit eth_unbind(struct usb_composite_dev *cdev)
 {
-       gether_cleanup(the_dev);
+       if (has_rndis())
+               usb_put_function_instance(fi_rndis);
+       if (use_eem)
+               usb_put_function_instance(fi_eem);
+       else if (can_support_ecm(cdev->gadget))
+               usb_put_function_instance(fi_ecm);
+       else
+               usb_put_function_instance(fi_geth);
        return 0;
 }
 
index abf8a31ae146028a89130835d600b4a7dfa00486..5d3561ea1c1595ea30f07bc3ad35253647701d97 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/etherdevice.h>
 
 #include "u_ether.h"
+#include "u_ether_configfs.h"
+#include "u_ecm.h"
 
 
 /*
@@ -684,9 +687,44 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
 {
        struct usb_composite_dev *cdev = c->cdev;
        struct f_ecm            *ecm = func_to_ecm(f);
+       struct usb_string       *us;
        int                     status;
        struct usb_ep           *ep;
 
+#ifndef USBF_ECM_INCLUDED
+       struct f_ecm_opts       *ecm_opts;
+
+       if (!can_support_ecm(cdev->gadget))
+               return -EINVAL;
+
+       ecm_opts = container_of(f->fi, struct f_ecm_opts, func_inst);
+
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to ecm_opts->bound access
+        */
+       if (!ecm_opts->bound) {
+               mutex_lock(&ecm_opts->lock);
+               gether_set_gadget(ecm_opts->net, cdev->gadget);
+               status = gether_register_netdev(ecm_opts->net);
+               mutex_unlock(&ecm_opts->lock);
+               if (status)
+                       return status;
+               ecm_opts->bound = true;
+       }
+#endif
+       us = usb_gstrings_attach(cdev, ecm_strings,
+                                ARRAY_SIZE(ecm_string_defs));
+       if (IS_ERR(us))
+               return PTR_ERR(us);
+       ecm_control_intf.iInterface = us[0].id;
+       ecm_data_intf.iInterface = us[2].id;
+       ecm_desc.iMACAddress = us[1].id;
+       ecm_iad_descriptor.iFunction = us[3].id;
+
        /* allocate instance-specific interface IDs */
        status = usb_interface_id(c, f);
        if (status < 0)
@@ -796,14 +834,15 @@ fail:
        return status;
 }
 
+#ifdef USBF_ECM_INCLUDED
+
 static void
-ecm_unbind(struct usb_configuration *c, struct usb_function *f)
+ecm_old_unbind(struct usb_configuration *c, struct usb_function *f)
 {
        struct f_ecm            *ecm = func_to_ecm(f);
 
        DBG(c->cdev, "ecm unbind\n");
 
-       ecm_string_defs[0].id = 0;
        usb_free_all_descriptors(f);
 
        kfree(ecm->notify_req->buf);
@@ -834,17 +873,6 @@ ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
                return -EINVAL;
 
-       if (ecm_string_defs[0].id == 0) {
-               status = usb_string_ids_tab(c->cdev, ecm_string_defs);
-               if (status)
-                       return status;
-
-               ecm_control_intf.iInterface = ecm_string_defs[0].id;
-               ecm_data_intf.iInterface = ecm_string_defs[2].id;
-               ecm_desc.iMACAddress = ecm_string_defs[1].id;
-               ecm_iad_descriptor.iFunction = ecm_string_defs[3].id;
-       }
-
        /* allocate and initialize one new instance */
        ecm = kzalloc(sizeof *ecm, GFP_KERNEL);
        if (!ecm)
@@ -858,10 +886,9 @@ ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        ecm->port.cdc_filter = DEFAULT_FILTER;
 
        ecm->port.func.name = "cdc_ethernet";
-       ecm->port.func.strings = ecm_strings;
        /* descriptors are per-instance copies */
        ecm->port.func.bind = ecm_bind;
-       ecm->port.func.unbind = ecm_unbind;
+       ecm->port.func.unbind = ecm_old_unbind;
        ecm->port.func.set_alt = ecm_set_alt;
        ecm->port.func.get_alt = ecm_get_alt;
        ecm->port.func.setup = ecm_setup;
@@ -872,3 +899,143 @@ ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
                kfree(ecm);
        return status;
 }
+
+#else
+
+static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct f_ecm_opts,
+                           func_inst.group);
+}
+
+/* f_ecm_item_ops */
+USB_ETHERNET_CONFIGFS_ITEM(ecm);
+
+/* f_ecm_opts_dev_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ecm);
+
+/* f_ecm_opts_host_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ecm);
+
+/* f_ecm_opts_qmult */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ecm);
+
+/* f_ecm_opts_ifname */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ecm);
+
+static struct configfs_attribute *ecm_attrs[] = {
+       &f_ecm_opts_dev_addr.attr,
+       &f_ecm_opts_host_addr.attr,
+       &f_ecm_opts_qmult.attr,
+       &f_ecm_opts_ifname.attr,
+       NULL,
+};
+
+static struct config_item_type ecm_func_type = {
+       .ct_item_ops    = &ecm_item_ops,
+       .ct_attrs       = ecm_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static void ecm_free_inst(struct usb_function_instance *f)
+{
+       struct f_ecm_opts *opts;
+
+       opts = container_of(f, struct f_ecm_opts, func_inst);
+       if (opts->bound)
+               gether_cleanup(netdev_priv(opts->net));
+       else
+               free_netdev(opts->net);
+       kfree(opts);
+}
+
+static struct usb_function_instance *ecm_alloc_inst(void)
+{
+       struct f_ecm_opts *opts;
+
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+       mutex_init(&opts->lock);
+       opts->func_inst.free_func_inst = ecm_free_inst;
+       opts->net = gether_setup_default();
+       if (IS_ERR(opts->net))
+               return ERR_PTR(PTR_ERR(opts->net));
+
+       config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type);
+
+       return &opts->func_inst;
+}
+
+static void ecm_free(struct usb_function *f)
+{
+       struct f_ecm *ecm;
+       struct f_ecm_opts *opts;
+
+       ecm = func_to_ecm(f);
+       opts = container_of(f->fi, struct f_ecm_opts, func_inst);
+       kfree(ecm);
+       mutex_lock(&opts->lock);
+       opts->refcnt--;
+       mutex_unlock(&opts->lock);
+}
+
+static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct f_ecm            *ecm = func_to_ecm(f);
+
+       DBG(c->cdev, "ecm unbind\n");
+
+       usb_free_all_descriptors(f);
+
+       kfree(ecm->notify_req->buf);
+       usb_ep_free_request(ecm->notify, ecm->notify_req);
+}
+
+struct usb_function *ecm_alloc(struct usb_function_instance *fi)
+{
+       struct f_ecm    *ecm;
+       struct f_ecm_opts *opts;
+       int status;
+
+       /* allocate and initialize one new instance */
+       ecm = kzalloc(sizeof(*ecm), GFP_KERNEL);
+       if (!ecm)
+               return ERR_PTR(-ENOMEM);
+
+       opts = container_of(fi, struct f_ecm_opts, func_inst);
+       mutex_lock(&opts->lock);
+       opts->refcnt++;
+
+       /* export host's Ethernet address in CDC format */
+       status = gether_get_host_addr_cdc(opts->net, ecm->ethaddr,
+                                         sizeof(ecm->ethaddr));
+       if (status < 12) {
+               kfree(ecm);
+               mutex_unlock(&opts->lock);
+               return ERR_PTR(-EINVAL);
+       }
+       ecm_string_defs[1].s = ecm->ethaddr;
+
+       ecm->port.ioport = netdev_priv(opts->net);
+       mutex_unlock(&opts->lock);
+       ecm->port.cdc_filter = DEFAULT_FILTER;
+
+       ecm->port.func.name = "cdc_ethernet";
+       /* descriptors are per-instance copies */
+       ecm->port.func.bind = ecm_bind;
+       ecm->port.func.unbind = ecm_unbind;
+       ecm->port.func.set_alt = ecm_set_alt;
+       ecm->port.func.get_alt = ecm_get_alt;
+       ecm->port.func.setup = ecm_setup;
+       ecm->port.func.disable = ecm_disable;
+       ecm->port.func.free_func = ecm_free;
+
+       return &ecm->port.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(ecm, ecm_alloc_inst, ecm_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
+
+#endif
index f4e0bbef602a3ed05c2df67b06d55884dfaf2161..90ee8022e8d80c29fbff9a1298d62c13ec869b42 100644 (file)
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/etherdevice.h>
 #include <linux/crc32.h>
 #include <linux/slab.h>
 
 #include "u_ether.h"
+#include "u_ether_configfs.h"
+#include "u_eem.h"
 
 #define EEM_HLEN 2
 
@@ -40,7 +43,7 @@ static inline struct f_eem *func_to_eem(struct usb_function *f)
 
 /* interface descriptor: */
 
-static struct usb_interface_descriptor eem_intf __initdata = {
+static struct usb_interface_descriptor eem_intf = {
        .bLength =              sizeof eem_intf,
        .bDescriptorType =      USB_DT_INTERFACE,
 
@@ -54,7 +57,7 @@ static struct usb_interface_descriptor eem_intf __initdata = {
 
 /* full speed support: */
 
-static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = {
+static struct usb_endpoint_descriptor eem_fs_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -62,7 +65,7 @@ static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = {
        .bmAttributes =         USB_ENDPOINT_XFER_BULK,
 };
 
-static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = {
+static struct usb_endpoint_descriptor eem_fs_out_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -70,7 +73,7 @@ static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = {
        .bmAttributes =         USB_ENDPOINT_XFER_BULK,
 };
 
-static struct usb_descriptor_header *eem_fs_function[] __initdata = {
+static struct usb_descriptor_header *eem_fs_function[] = {
        /* CDC EEM control descriptors */
        (struct usb_descriptor_header *) &eem_intf,
        (struct usb_descriptor_header *) &eem_fs_in_desc,
@@ -80,7 +83,7 @@ static struct usb_descriptor_header *eem_fs_function[] __initdata = {
 
 /* high speed support: */
 
-static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = {
+static struct usb_endpoint_descriptor eem_hs_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -89,7 +92,7 @@ static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = {
        .wMaxPacketSize =       cpu_to_le16(512),
 };
 
-static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor eem_hs_out_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -98,7 +101,7 @@ static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = {
        .wMaxPacketSize =       cpu_to_le16(512),
 };
 
-static struct usb_descriptor_header *eem_hs_function[] __initdata = {
+static struct usb_descriptor_header *eem_hs_function[] = {
        /* CDC EEM control descriptors */
        (struct usb_descriptor_header *) &eem_intf,
        (struct usb_descriptor_header *) &eem_hs_in_desc,
@@ -108,7 +111,7 @@ static struct usb_descriptor_header *eem_hs_function[] __initdata = {
 
 /* super speed support: */
 
-static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = {
+static struct usb_endpoint_descriptor eem_ss_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -117,7 +120,7 @@ static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = {
        .wMaxPacketSize =       cpu_to_le16(1024),
 };
 
-static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = {
+static struct usb_endpoint_descriptor eem_ss_out_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -126,7 +129,7 @@ static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = {
        .wMaxPacketSize =       cpu_to_le16(1024),
 };
 
-static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = {
+static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc = {
        .bLength =              sizeof eem_ss_bulk_comp_desc,
        .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 
@@ -135,7 +138,7 @@ static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = {
        /* .bmAttributes =      0, */
 };
 
-static struct usb_descriptor_header *eem_ss_function[] __initdata = {
+static struct usb_descriptor_header *eem_ss_function[] = {
        /* CDC EEM control descriptors */
        (struct usb_descriptor_header *) &eem_intf,
        (struct usb_descriptor_header *) &eem_ss_in_desc,
@@ -242,14 +245,40 @@ static void eem_disable(struct usb_function *f)
 
 /* EEM function driver setup/binding */
 
-static int __init
-eem_bind(struct usb_configuration *c, struct usb_function *f)
+static int eem_bind(struct usb_configuration *c, struct usb_function *f)
 {
        struct usb_composite_dev *cdev = c->cdev;
        struct f_eem            *eem = func_to_eem(f);
+       struct usb_string       *us;
        int                     status;
        struct usb_ep           *ep;
 
+       struct f_eem_opts       *eem_opts;
+
+       eem_opts = container_of(f->fi, struct f_eem_opts, func_inst);
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to eem_opts->bound access
+        */
+       if (!eem_opts->bound) {
+               mutex_lock(&eem_opts->lock);
+               gether_set_gadget(eem_opts->net, cdev->gadget);
+               status = gether_register_netdev(eem_opts->net);
+               mutex_unlock(&eem_opts->lock);
+               if (status)
+                       return status;
+               eem_opts->bound = true;
+       }
+
+       us = usb_gstrings_attach(cdev, eem_strings,
+                                ARRAY_SIZE(eem_string_defs));
+       if (IS_ERR(us))
+               return PTR_ERR(us);
+       eem_intf.iInterface = us[0].id;
+
        /* allocate instance-specific interface IDs */
        status = usb_interface_id(c, f);
        if (status < 0)
@@ -307,17 +336,6 @@ fail:
        return status;
 }
 
-static void
-eem_unbind(struct usb_configuration *c, struct usb_function *f)
-{
-       struct f_eem    *eem = func_to_eem(f);
-
-       DBG(c->cdev, "eem unbind\n");
-
-       usb_free_all_descriptors(f);
-       kfree(eem);
-}
-
 static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct sk_buff *skb = (struct sk_buff *)req->context;
@@ -518,55 +536,124 @@ error:
        return status;
 }
 
-/**
- * eem_bind_config - add CDC Ethernet (EEM) network link to a configuration
- * @c: the configuration to support the network link
- * Context: single threaded during gadget setup
- *
- * Returns zero on success, else negative errno.
- *
- * Caller must have called @gether_setup().  Caller is also responsible
- * for calling @gether_cleanup() before module unload.
- */
-int __init eem_bind_config(struct usb_configuration *c, struct eth_dev *dev)
+static inline struct f_eem_opts *to_f_eem_opts(struct config_item *item)
 {
-       struct f_eem    *eem;
-       int             status;
+       return container_of(to_config_group(item), struct f_eem_opts,
+                           func_inst.group);
+}
 
-       /* maybe allocate device-global string IDs */
-       if (eem_string_defs[0].id == 0) {
+/* f_eem_item_ops */
+USB_ETHERNET_CONFIGFS_ITEM(eem);
 
-               /* control interface label */
-               status = usb_string_id(c->cdev);
-               if (status < 0)
-                       return status;
-               eem_string_defs[0].id = status;
-               eem_intf.iInterface = status;
-       }
+/* f_eem_opts_dev_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(eem);
+
+/* f_eem_opts_host_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(eem);
+
+/* f_eem_opts_qmult */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(eem);
+
+/* f_eem_opts_ifname */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(eem);
+
+static struct configfs_attribute *eem_attrs[] = {
+       &f_eem_opts_dev_addr.attr,
+       &f_eem_opts_host_addr.attr,
+       &f_eem_opts_qmult.attr,
+       &f_eem_opts_ifname.attr,
+       NULL,
+};
+
+static struct config_item_type eem_func_type = {
+       .ct_item_ops    = &eem_item_ops,
+       .ct_attrs       = eem_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static void eem_free_inst(struct usb_function_instance *f)
+{
+       struct f_eem_opts *opts;
+
+       opts = container_of(f, struct f_eem_opts, func_inst);
+       if (opts->bound)
+               gether_cleanup(netdev_priv(opts->net));
+       else
+               free_netdev(opts->net);
+       kfree(opts);
+}
+
+static struct usb_function_instance *eem_alloc_inst(void)
+{
+       struct f_eem_opts *opts;
+
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+       mutex_init(&opts->lock);
+       opts->func_inst.free_func_inst = eem_free_inst;
+       opts->net = gether_setup_default();
+       if (IS_ERR(opts->net))
+               return ERR_CAST(opts->net);
+
+       config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type);
+
+       return &opts->func_inst;
+}
+
+static void eem_free(struct usb_function *f)
+{
+       struct f_eem *eem;
+       struct f_eem_opts *opts;
+
+       eem = func_to_eem(f);
+       opts = container_of(f->fi, struct f_eem_opts, func_inst);
+       kfree(eem);
+       mutex_lock(&opts->lock);
+       opts->refcnt--;
+       mutex_unlock(&opts->lock);
+}
+
+static void eem_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       DBG(c->cdev, "eem unbind\n");
+
+       usb_free_all_descriptors(f);
+}
+
+struct usb_function *eem_alloc(struct usb_function_instance *fi)
+{
+       struct f_eem    *eem;
+       struct f_eem_opts *opts;
 
        /* allocate and initialize one new instance */
-       eem = kzalloc(sizeof *eem, GFP_KERNEL);
+       eem = kzalloc(sizeof(*eem), GFP_KERNEL);
        if (!eem)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
-       eem->port.ioport = dev;
+       opts = container_of(fi, struct f_eem_opts, func_inst);
+       mutex_lock(&opts->lock);
+       opts->refcnt++;
+
+       eem->port.ioport = netdev_priv(opts->net);
+       mutex_unlock(&opts->lock);
        eem->port.cdc_filter = DEFAULT_FILTER;
 
        eem->port.func.name = "cdc_eem";
-       eem->port.func.strings = eem_strings;
        /* descriptors are per-instance copies */
        eem->port.func.bind = eem_bind;
        eem->port.func.unbind = eem_unbind;
        eem->port.func.set_alt = eem_set_alt;
        eem->port.func.setup = eem_setup;
        eem->port.func.disable = eem_disable;
+       eem->port.func.free_func = eem_free;
        eem->port.wrap = eem_wrap;
        eem->port.unwrap = eem_unwrap;
        eem->port.header_len = EEM_HLEN;
 
-       status = usb_add_function(c, &eem->port.func);
-       if (status)
-               kfree(eem);
-       return status;
+       return &eem->port.func;
 }
 
+DECLARE_USB_FUNCTION_INIT(eem, eem_alloc_inst, eem_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
index 97666e8b1b9554ce54b1b6309a537eb456e406d5..56f1fd1cba255259ae7618549708740d473a4319 100644 (file)
@@ -413,6 +413,7 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
 /* Caller must hold fsg->lock */
 static void wakeup_thread(struct fsg_common *common)
 {
+       smp_wmb();      /* ensure the write of bh->state is complete */
        /* Tell the main thread that something has happened */
        common->thread_wakeup_needed = 1;
        if (common->thread_task)
@@ -632,6 +633,7 @@ static int sleep_thread(struct fsg_common *common)
        }
        __set_current_state(TASK_RUNNING);
        common->thread_wakeup_needed = 0;
+       smp_rmb();      /* ensure the latest bh->state is visible */
        return rc;
 }
 
@@ -2745,8 +2747,8 @@ buffhds_first_it:
                 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
                 /* Assume product name dependent on the first LUN */
                 cfg->product_name ?: (common->luns->cdrom
-                                    ? "File-Stor Gadget"
-                                    : "File-CD Gadget"),
+                                    ? "File-CD Gadget"
+                                    : "File-Stor Gadget"),
                 i);
 
        /*
index ee19bc8d0040c1c4efa868d32e7c2e2336875ed4..952177f7eb9b40e8045bfa85730239ab22efd97c 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/etherdevice.h>
 #include <linux/crc32.h>
@@ -23,6 +24,8 @@
 #include <linux/usb/cdc.h>
 
 #include "u_ether.h"
+#include "u_ether_configfs.h"
+#include "u_ncm.h"
 
 /*
  * This function is a "CDC Network Control Model" (CDC NCM) Ethernet link.
@@ -125,7 +128,7 @@ static struct usb_cdc_ncm_ntb_parameters ntb_parameters = {
 #define NCM_STATUS_INTERVAL_MS         32
 #define NCM_STATUS_BYTECOUNT           16      /* 8 byte header + data */
 
-static struct usb_interface_assoc_descriptor ncm_iad_desc __initdata = {
+static struct usb_interface_assoc_descriptor ncm_iad_desc = {
        .bLength =              sizeof ncm_iad_desc,
        .bDescriptorType =      USB_DT_INTERFACE_ASSOCIATION,
 
@@ -139,7 +142,7 @@ static struct usb_interface_assoc_descriptor ncm_iad_desc __initdata = {
 
 /* interface descriptor: */
 
-static struct usb_interface_descriptor ncm_control_intf __initdata = {
+static struct usb_interface_descriptor ncm_control_intf = {
        .bLength =              sizeof ncm_control_intf,
        .bDescriptorType =      USB_DT_INTERFACE,
 
@@ -151,7 +154,7 @@ static struct usb_interface_descriptor ncm_control_intf __initdata = {
        /* .iInterface = DYNAMIC */
 };
 
-static struct usb_cdc_header_desc ncm_header_desc __initdata = {
+static struct usb_cdc_header_desc ncm_header_desc = {
        .bLength =              sizeof ncm_header_desc,
        .bDescriptorType =      USB_DT_CS_INTERFACE,
        .bDescriptorSubType =   USB_CDC_HEADER_TYPE,
@@ -159,7 +162,7 @@ static struct usb_cdc_header_desc ncm_header_desc __initdata = {
        .bcdCDC =               cpu_to_le16(0x0110),
 };
 
-static struct usb_cdc_union_desc ncm_union_desc __initdata = {
+static struct usb_cdc_union_desc ncm_union_desc = {
        .bLength =              sizeof(ncm_union_desc),
        .bDescriptorType =      USB_DT_CS_INTERFACE,
        .bDescriptorSubType =   USB_CDC_UNION_TYPE,
@@ -167,7 +170,7 @@ static struct usb_cdc_union_desc ncm_union_desc __initdata = {
        /* .bSlaveInterface0 =  DYNAMIC */
 };
 
-static struct usb_cdc_ether_desc ecm_desc __initdata = {
+static struct usb_cdc_ether_desc ecm_desc = {
        .bLength =              sizeof ecm_desc,
        .bDescriptorType =      USB_DT_CS_INTERFACE,
        .bDescriptorSubType =   USB_CDC_ETHERNET_TYPE,
@@ -182,7 +185,7 @@ static struct usb_cdc_ether_desc ecm_desc __initdata = {
 
 #define NCAPS  (USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE)
 
-static struct usb_cdc_ncm_desc ncm_desc __initdata = {
+static struct usb_cdc_ncm_desc ncm_desc = {
        .bLength =              sizeof ncm_desc,
        .bDescriptorType =      USB_DT_CS_INTERFACE,
        .bDescriptorSubType =   USB_CDC_NCM_TYPE,
@@ -194,7 +197,7 @@ static struct usb_cdc_ncm_desc ncm_desc __initdata = {
 
 /* the default data interface has no endpoints ... */
 
-static struct usb_interface_descriptor ncm_data_nop_intf __initdata = {
+static struct usb_interface_descriptor ncm_data_nop_intf = {
        .bLength =              sizeof ncm_data_nop_intf,
        .bDescriptorType =      USB_DT_INTERFACE,
 
@@ -209,7 +212,7 @@ static struct usb_interface_descriptor ncm_data_nop_intf __initdata = {
 
 /* ... but the "real" data interface has two bulk endpoints */
 
-static struct usb_interface_descriptor ncm_data_intf __initdata = {
+static struct usb_interface_descriptor ncm_data_intf = {
        .bLength =              sizeof ncm_data_intf,
        .bDescriptorType =      USB_DT_INTERFACE,
 
@@ -224,7 +227,7 @@ static struct usb_interface_descriptor ncm_data_intf __initdata = {
 
 /* full speed support: */
 
-static struct usb_endpoint_descriptor fs_ncm_notify_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ncm_notify_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -234,7 +237,7 @@ static struct usb_endpoint_descriptor fs_ncm_notify_desc __initdata = {
        .bInterval =            NCM_STATUS_INTERVAL_MS,
 };
 
-static struct usb_endpoint_descriptor fs_ncm_in_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ncm_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -242,7 +245,7 @@ static struct usb_endpoint_descriptor fs_ncm_in_desc __initdata = {
        .bmAttributes =         USB_ENDPOINT_XFER_BULK,
 };
 
-static struct usb_endpoint_descriptor fs_ncm_out_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ncm_out_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -250,7 +253,7 @@ static struct usb_endpoint_descriptor fs_ncm_out_desc __initdata = {
        .bmAttributes =         USB_ENDPOINT_XFER_BULK,
 };
 
-static struct usb_descriptor_header *ncm_fs_function[] __initdata = {
+static struct usb_descriptor_header *ncm_fs_function[] = {
        (struct usb_descriptor_header *) &ncm_iad_desc,
        /* CDC NCM control descriptors */
        (struct usb_descriptor_header *) &ncm_control_intf,
@@ -269,7 +272,7 @@ static struct usb_descriptor_header *ncm_fs_function[] __initdata = {
 
 /* high speed support: */
 
-static struct usb_endpoint_descriptor hs_ncm_notify_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ncm_notify_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -278,7 +281,7 @@ static struct usb_endpoint_descriptor hs_ncm_notify_desc __initdata = {
        .wMaxPacketSize =       cpu_to_le16(NCM_STATUS_BYTECOUNT),
        .bInterval =            USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
 };
-static struct usb_endpoint_descriptor hs_ncm_in_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ncm_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -287,7 +290,7 @@ static struct usb_endpoint_descriptor hs_ncm_in_desc __initdata = {
        .wMaxPacketSize =       cpu_to_le16(512),
 };
 
-static struct usb_endpoint_descriptor hs_ncm_out_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ncm_out_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
        .bDescriptorType =      USB_DT_ENDPOINT,
 
@@ -296,7 +299,7 @@ static struct usb_endpoint_descriptor hs_ncm_out_desc __initdata = {
        .wMaxPacketSize =       cpu_to_le16(512),
 };
 
-static struct usb_descriptor_header *ncm_hs_function[] __initdata = {
+static struct usb_descriptor_header *ncm_hs_function[] = {
        (struct usb_descriptor_header *) &ncm_iad_desc,
        /* CDC NCM control descriptors */
        (struct usb_descriptor_header *) &ncm_control_intf,
@@ -1152,13 +1155,44 @@ static void ncm_close(struct gether *geth)
 
 /* ethernet function driver setup/binding */
 
-static int __init
-ncm_bind(struct usb_configuration *c, struct usb_function *f)
+static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
 {
        struct usb_composite_dev *cdev = c->cdev;
        struct f_ncm            *ncm = func_to_ncm(f);
+       struct usb_string       *us;
        int                     status;
        struct usb_ep           *ep;
+       struct f_ncm_opts       *ncm_opts;
+
+       if (!can_support_ecm(cdev->gadget))
+               return -EINVAL;
+
+       ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to ncm_opts->bound access
+        */
+       if (!ncm_opts->bound) {
+               mutex_lock(&ncm_opts->lock);
+               gether_set_gadget(ncm_opts->net, cdev->gadget);
+               status = gether_register_netdev(ncm_opts->net);
+               mutex_unlock(&ncm_opts->lock);
+               if (status)
+                       return status;
+               ncm_opts->bound = true;
+       }
+       us = usb_gstrings_attach(cdev, ncm_strings,
+                                ARRAY_SIZE(ncm_string_defs));
+       if (IS_ERR(us))
+               return PTR_ERR(us);
+       ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
+       ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
+       ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
+       ecm_desc.iMACAddress = us[STRING_MAC_IDX].id;
+       ncm_iad_desc.iFunction = us[STRING_IAD_IDX].id;
 
        /* allocate instance-specific interface IDs */
        status = usb_interface_id(c, f);
@@ -1259,74 +1293,128 @@ fail:
        return status;
 }
 
-static void
-ncm_unbind(struct usb_configuration *c, struct usb_function *f)
+static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
 {
-       struct f_ncm            *ncm = func_to_ncm(f);
+       return container_of(to_config_group(item), struct f_ncm_opts,
+                           func_inst.group);
+}
 
-       DBG(c->cdev, "ncm unbind\n");
+/* f_ncm_item_ops */
+USB_ETHERNET_CONFIGFS_ITEM(ncm);
 
-       ncm_string_defs[0].id = 0;
-       usb_free_all_descriptors(f);
+/* f_ncm_opts_dev_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm);
 
-       kfree(ncm->notify_req->buf);
-       usb_ep_free_request(ncm->notify, ncm->notify_req);
+/* f_ncm_opts_host_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm);
 
+/* f_ncm_opts_qmult */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
+
+/* f_ncm_opts_ifname */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
+
+static struct configfs_attribute *ncm_attrs[] = {
+       &f_ncm_opts_dev_addr.attr,
+       &f_ncm_opts_host_addr.attr,
+       &f_ncm_opts_qmult.attr,
+       &f_ncm_opts_ifname.attr,
+       NULL,
+};
+
+static struct config_item_type ncm_func_type = {
+       .ct_item_ops    = &ncm_item_ops,
+       .ct_attrs       = ncm_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static void ncm_free_inst(struct usb_function_instance *f)
+{
+       struct f_ncm_opts *opts;
+
+       opts = container_of(f, struct f_ncm_opts, func_inst);
+       if (opts->bound)
+               gether_cleanup(netdev_priv(opts->net));
+       else
+               free_netdev(opts->net);
+       kfree(opts);
+}
+
+static struct usb_function_instance *ncm_alloc_inst(void)
+{
+       struct f_ncm_opts *opts;
+
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+       mutex_init(&opts->lock);
+       opts->func_inst.free_func_inst = ncm_free_inst;
+       opts->net = gether_setup_default();
+       if (IS_ERR(opts->net))
+               return ERR_PTR(PTR_ERR(opts->net));
+
+       config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
+
+       return &opts->func_inst;
+}
+
+static void ncm_free(struct usb_function *f)
+{
+       struct f_ncm *ncm;
+       struct f_ncm_opts *opts;
+
+       ncm = func_to_ncm(f);
+       opts = container_of(f->fi, struct f_ncm_opts, func_inst);
        kfree(ncm);
+       mutex_lock(&opts->lock);
+       opts->refcnt--;
+       mutex_unlock(&opts->lock);
 }
 
-/**
- * ncm_bind_config - add CDC Network link to a configuration
- * @c: the configuration to support the network link
- * @ethaddr: a buffer in which the ethernet address of the host side
- *     side of the link was recorded
- * Context: single threaded during gadget setup
- *
- * Returns zero on success, else negative errno.
- *
- * Caller must have called @gether_setup().  Caller is also responsible
- * for calling @gether_cleanup() before module unload.
- */
-int __init ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
-               struct eth_dev *dev)
+static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
 {
-       struct f_ncm    *ncm;
-       int             status;
+       struct f_ncm *ncm = func_to_ncm(f);
 
-       if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
-               return -EINVAL;
+       DBG(c->cdev, "ncm unbind\n");
 
-       if (ncm_string_defs[0].id == 0) {
-               status = usb_string_ids_tab(c->cdev, ncm_string_defs);
-               if (status < 0)
-                       return status;
-               ncm_control_intf.iInterface =
-                       ncm_string_defs[STRING_CTRL_IDX].id;
+       usb_free_all_descriptors(f);
 
-               status = ncm_string_defs[STRING_DATA_IDX].id;
-               ncm_data_nop_intf.iInterface = status;
-               ncm_data_intf.iInterface = status;
+       kfree(ncm->notify_req->buf);
+       usb_ep_free_request(ncm->notify, ncm->notify_req);
+}
 
-               ecm_desc.iMACAddress = ncm_string_defs[STRING_MAC_IDX].id;
-               ncm_iad_desc.iFunction = ncm_string_defs[STRING_IAD_IDX].id;
-       }
+struct usb_function *ncm_alloc(struct usb_function_instance *fi)
+{
+       struct f_ncm            *ncm;
+       struct f_ncm_opts       *opts;
+       int status;
 
        /* allocate and initialize one new instance */
-       ncm = kzalloc(sizeof *ncm, GFP_KERNEL);
+       ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
        if (!ncm)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
+
+       opts = container_of(fi, struct f_ncm_opts, func_inst);
+       mutex_lock(&opts->lock);
+       opts->refcnt++;
 
        /* export host's Ethernet address in CDC format */
-       snprintf(ncm->ethaddr, sizeof ncm->ethaddr, "%pm", ethaddr);
+       status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
+                                     sizeof(ncm->ethaddr));
+       if (status < 12) { /* strlen("01234567890a") */
+               kfree(ncm);
+               mutex_unlock(&opts->lock);
+               return ERR_PTR(-EINVAL);
+       }
        ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
 
        spin_lock_init(&ncm->lock);
        ncm_reset_values(ncm);
-       ncm->port.ioport = dev;
+       ncm->port.ioport = netdev_priv(opts->net);
+       mutex_unlock(&opts->lock);
        ncm->port.is_fixed = true;
 
        ncm->port.func.name = "cdc_network";
-       ncm->port.func.strings = ncm_strings;
        /* descriptors are per-instance copies */
        ncm->port.func.bind = ncm_bind;
        ncm->port.func.unbind = ncm_unbind;
@@ -1334,12 +1422,14 @@ int __init ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        ncm->port.func.get_alt = ncm_get_alt;
        ncm->port.func.setup = ncm_setup;
        ncm->port.func.disable = ncm_disable;
+       ncm->port.func.free_func = ncm_free;
 
        ncm->port.wrap = ncm_wrap_ntb;
        ncm->port.unwrap = ncm_unwrap_ntb;
 
-       status = usb_add_function(c, &ncm->port.func);
-       if (status)
-               kfree(ncm);
-       return status;
+       return &ncm->port.func;
 }
+
+DECLARE_USB_FUNCTION_INIT(ncm, ncm_alloc_inst, ncm_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yauheni Kaliuta");
index 8aa2be5329bcff2a3619c4aa0044c944afa5566f..ad39f1dacba353519d72b5b61bb5e3e90b46f44f 100644 (file)
@@ -309,23 +309,20 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
 {
        struct usb_composite_dev *cdev = c->cdev;
        struct f_obex           *obex = func_to_obex(f);
+       struct usb_string       *us;
        int                     status;
        struct usb_ep           *ep;
 
        if (!can_support_obex(c))
                return -EINVAL;
 
-       if (obex_string_defs[OBEX_CTRL_IDX].id == 0) {
-               status = usb_string_ids_tab(c->cdev, obex_string_defs);
-               if (status < 0)
-                       return status;
-               obex_control_intf.iInterface =
-                       obex_string_defs[OBEX_CTRL_IDX].id;
-
-               status = obex_string_defs[OBEX_DATA_IDX].id;
-               obex_data_nop_intf.iInterface = status;
-               obex_data_intf.iInterface = status;
-       }
+       us = usb_gstrings_attach(cdev, obex_strings,
+                                ARRAY_SIZE(obex_string_defs));
+       if (IS_ERR(us))
+               return PTR_ERR(us);
+       obex_control_intf.iInterface = us[OBEX_CTRL_IDX].id;
+       obex_data_nop_intf.iInterface = us[OBEX_DATA_IDX].id;
+       obex_data_intf.iInterface = us[OBEX_DATA_IDX].id;
 
        /* allocate instance-specific interface IDs, and patch descriptors */
 
@@ -406,57 +403,6 @@ fail:
        return status;
 }
 
-#ifdef USBF_OBEX_INCLUDED
-
-static void
-obex_old_unbind(struct usb_configuration *c, struct usb_function *f)
-{
-       obex_string_defs[OBEX_CTRL_IDX].id = 0;
-       usb_free_all_descriptors(f);
-       kfree(func_to_obex(f));
-}
-
-/**
- * obex_bind_config - add a CDC OBEX function to a configuration
- * @c: the configuration to support the CDC OBEX instance
- * @port_num: /dev/ttyGS* port this interface will use
- * Context: single threaded during gadget setup
- *
- * Returns zero on success, else negative errno.
- */
-int __init obex_bind_config(struct usb_configuration *c, u8 port_num)
-{
-       struct f_obex   *obex;
-       int             status;
-
-       /* allocate and initialize one new instance */
-       obex = kzalloc(sizeof *obex, GFP_KERNEL);
-       if (!obex)
-               return -ENOMEM;
-
-       obex->port_num = port_num;
-
-       obex->port.connect = obex_connect;
-       obex->port.disconnect = obex_disconnect;
-
-       obex->port.func.name = "obex";
-       obex->port.func.strings = obex_strings;
-       /* descriptors are per-instance copies */
-       obex->port.func.bind = obex_bind;
-       obex->port.func.unbind = obex_old_unbind;
-       obex->port.func.set_alt = obex_set_alt;
-       obex->port.func.get_alt = obex_get_alt;
-       obex->port.func.disable = obex_disable;
-
-       status = usb_add_function(c, &obex->port.func);
-       if (status)
-               kfree(obex);
-
-       return status;
-}
-
-#else
-
 static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item)
 {
        return container_of(to_config_group(item), struct f_serial_opts,
@@ -550,7 +496,6 @@ static void obex_free(struct usb_function *f)
 
 static void obex_unbind(struct usb_configuration *c, struct usb_function *f)
 {
-       obex_string_defs[OBEX_CTRL_IDX].id = 0;
        usb_free_all_descriptors(f);
 }
 
@@ -572,7 +517,6 @@ struct usb_function *obex_alloc(struct usb_function_instance *fi)
        obex->port.disconnect = obex_disconnect;
 
        obex->port.func.name = "obex";
-       obex->port.func.strings = obex_strings;
        /* descriptors are per-instance copies */
        obex->port.func.bind = obex_bind;
        obex->port.func.unbind = obex_unbind;
@@ -585,8 +529,5 @@ struct usb_function *obex_alloc(struct usb_function_instance *fi)
 }
 
 DECLARE_USB_FUNCTION_INIT(obex, obex_alloc_inst, obex_alloc);
-
-#endif
-
 MODULE_AUTHOR("Felipe Balbi");
 MODULE_LICENSE("GPL");
index b21ab558b6c01ada7fbca98bf63ec2949090a17d..7944fb0efe3b6a67471e7379a03a5d8ebc9b23fb 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/device.h>
 
 #include <linux/netdevice.h>
@@ -25,6 +26,7 @@
 #include <linux/usb/composite.h>
 
 #include "u_phonet.h"
+#include "u_ether.h"
 
 #define PN_MEDIA_USB   0x1B
 #define MAXPACKET      512
@@ -478,8 +480,7 @@ static void pn_disconnect(struct usb_function *f)
 
 /*-------------------------------------------------------------------------*/
 
-static __init
-int pn_bind(struct usb_configuration *c, struct usb_function *f)
+static int pn_bind(struct usb_configuration *c, struct usb_function *f)
 {
        struct usb_composite_dev *cdev = c->cdev;
        struct usb_gadget *gadget = cdev->gadget;
@@ -487,6 +488,27 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f)
        struct usb_ep *ep;
        int status, i;
 
+#ifndef USBF_PHONET_INCLUDED
+       struct f_phonet_opts *phonet_opts;
+
+       phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst);
+
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to phonet_opts->bound access
+        */
+       if (!phonet_opts->bound) {
+               gphonet_set_gadget(phonet_opts->net, gadget);
+               status = gphonet_register_netdev(phonet_opts->net);
+               if (status)
+                       return status;
+               phonet_opts->bound = true;
+       }
+#endif
+
        /* Reserve interface IDs */
        status = usb_interface_id(c, f);
        if (status < 0)
@@ -560,8 +582,98 @@ err:
        return status;
 }
 
-static void
-pn_unbind(struct usb_configuration *c, struct usb_function *f)
+static inline struct f_phonet_opts *to_f_phonet_opts(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct f_phonet_opts,
+                       func_inst.group);
+}
+
+CONFIGFS_ATTR_STRUCT(f_phonet_opts);
+static ssize_t f_phonet_attr_show(struct config_item *item,
+                               struct configfs_attribute *attr,
+                               char *page)
+{
+       struct f_phonet_opts *opts = to_f_phonet_opts(item);
+       struct f_phonet_opts_attribute *f_phonet_opts_attr =
+               container_of(attr, struct f_phonet_opts_attribute, attr);
+       ssize_t ret = 0;
+
+       if (f_phonet_opts_attr->show)
+               ret = f_phonet_opts_attr->show(opts, page);
+       return ret;
+}
+
+static void phonet_attr_release(struct config_item *item)
+{
+       struct f_phonet_opts *opts = to_f_phonet_opts(item);
+
+       usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations phonet_item_ops = {
+       .release                = phonet_attr_release,
+       .show_attribute         = f_phonet_attr_show,
+};
+
+static ssize_t f_phonet_ifname_show(struct f_phonet_opts *opts, char *page)
+{
+       return gether_get_ifname(opts->net, page, PAGE_SIZE);
+}
+
+static struct f_phonet_opts_attribute f_phonet_ifname =
+       __CONFIGFS_ATTR_RO(ifname, f_phonet_ifname_show);
+
+static struct configfs_attribute *phonet_attrs[] = {
+       &f_phonet_ifname.attr,
+       NULL,
+};
+
+static struct config_item_type phonet_func_type = {
+       .ct_item_ops    = &phonet_item_ops,
+       .ct_attrs       = phonet_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static void phonet_free_inst(struct usb_function_instance *f)
+{
+       struct f_phonet_opts *opts;
+
+       opts = container_of(f, struct f_phonet_opts, func_inst);
+       if (opts->bound)
+               gphonet_cleanup(opts->net);
+       else
+               free_netdev(opts->net);
+       kfree(opts);
+}
+
+static struct usb_function_instance *phonet_alloc_inst(void)
+{
+       struct f_phonet_opts *opts;
+
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+
+       opts->func_inst.free_func_inst = phonet_free_inst;
+       opts->net = gphonet_setup_default();
+       if (IS_ERR(opts->net))
+               return ERR_PTR(PTR_ERR(opts->net));
+
+       config_group_init_type_name(&opts->func_inst.group, "",
+                       &phonet_func_type);
+
+       return &opts->func_inst;
+}
+
+static void phonet_free(struct usb_function *f)
+{
+       struct f_phonet *phonet;
+
+       phonet = func_to_pn(f);
+       kfree(phonet);
+}
+
+static void pn_unbind(struct usb_configuration *c, struct usb_function *f)
 {
        struct f_phonet *fp = func_to_pn(f);
        int i;
@@ -574,61 +686,72 @@ pn_unbind(struct usb_configuration *c, struct usb_function *f)
                        usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
 
        usb_free_all_descriptors(f);
-       kfree(fp);
 }
 
-/*-------------------------------------------------------------------------*/
-
-static struct net_device *dev;
-
-int __init phonet_bind_config(struct usb_configuration *c)
+struct usb_function *phonet_alloc(struct usb_function_instance *fi)
 {
        struct f_phonet *fp;
-       int err, size;
+       struct f_phonet_opts *opts;
+       int size;
 
        size = sizeof(*fp) + (phonet_rxq_size * sizeof(struct usb_request *));
        fp = kzalloc(size, GFP_KERNEL);
        if (!fp)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
+
+       opts = container_of(fi, struct f_phonet_opts, func_inst);
 
-       fp->dev = dev;
+       fp->dev = opts->net;
        fp->function.name = "phonet";
        fp->function.bind = pn_bind;
        fp->function.unbind = pn_unbind;
        fp->function.set_alt = pn_set_alt;
        fp->function.get_alt = pn_get_alt;
        fp->function.disable = pn_disconnect;
+       fp->function.free_func = phonet_free;
        spin_lock_init(&fp->rx.lock);
 
-       err = usb_add_function(c, &fp->function);
-       if (err)
-               kfree(fp);
-       return err;
+       return &fp->function;
 }
 
-int __init gphonet_setup(struct usb_gadget *gadget)
+struct net_device *gphonet_setup_default(void)
 {
+       struct net_device *dev;
        struct phonet_port *port;
-       int err;
 
        /* Create net device */
-       BUG_ON(dev);
        dev = alloc_netdev(sizeof(*port), "upnlink%d", pn_net_setup);
        if (!dev)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        port = netdev_priv(dev);
        spin_lock_init(&port->lock);
        netif_carrier_off(dev);
-       SET_NETDEV_DEV(dev, &gadget->dev);
 
-       err = register_netdev(dev);
-       if (err)
-               free_netdev(dev);
-       return err;
+       return dev;
+}
+
+void gphonet_set_gadget(struct net_device *net, struct usb_gadget *g)
+{
+       SET_NETDEV_DEV(net, &g->dev);
+}
+
+int gphonet_register_netdev(struct net_device *net)
+{
+       int status;
+
+       status = register_netdev(net);
+       if (status)
+               free_netdev(net);
+
+       return status;
 }
 
-void gphonet_cleanup(void)
+void gphonet_cleanup(struct net_device *dev)
 {
        unregister_netdev(dev);
 }
+
+DECLARE_USB_FUNCTION_INIT(phonet, phonet_alloc_inst, phonet_alloc);
+MODULE_AUTHOR("Rémi Denis-Courmont");
+MODULE_LICENSE("GPL");
index 36e8c44d8e5e2fb04222b86b4e9fbc4d3a493083..191df35ae69d04e31c6fc70ac7d76f2b4ff83be3 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/etherdevice.h>
 
 #include <linux/atomic.h>
 
 #include "u_ether.h"
+#include "u_ether_configfs.h"
+#include "u_rndis.h"
 #include "rndis.h"
 
-
 /*
  * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
  * been promoted instead of the standard CDC Ethernet.  The published RNDIS
@@ -655,6 +657,13 @@ static void rndis_close(struct gether *geth)
 
 /*-------------------------------------------------------------------------*/
 
+/* Some controllers can't support RNDIS ... */
+static inline bool can_support_rndis(struct usb_configuration *c)
+{
+       /* everything else is *presumably* fine */
+       return true;
+}
+
 /* ethernet function driver setup/binding */
 
 static int
@@ -662,9 +671,41 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
 {
        struct usb_composite_dev *cdev = c->cdev;
        struct f_rndis          *rndis = func_to_rndis(f);
+       struct usb_string       *us;
        int                     status;
        struct usb_ep           *ep;
 
+#ifndef USB_FRNDIS_INCLUDED
+       struct f_rndis_opts *rndis_opts;
+
+       if (!can_support_rndis(c))
+               return -EINVAL;
+
+       rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst);
+
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to rndis_opts->bound access
+        */
+       if (!rndis_opts->bound) {
+               gether_set_gadget(rndis_opts->net, cdev->gadget);
+               status = gether_register_netdev(rndis_opts->net);
+               if (status)
+                       return status;
+               rndis_opts->bound = true;
+       }
+#endif
+       us = usb_gstrings_attach(cdev, rndis_strings,
+                                ARRAY_SIZE(rndis_string_defs));
+       if (IS_ERR(us))
+               return PTR_ERR(us);
+       rndis_control_intf.iInterface = us[0].id;
+       rndis_data_intf.iInterface = us[1].id;
+       rndis_iad_descriptor.iFunction = us[2].id;
+
        /* allocate instance-specific interface IDs */
        status = usb_interface_id(c, f);
        if (status < 0)
@@ -741,10 +782,12 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
        rndis->port.open = rndis_open;
        rndis->port.close = rndis_close;
 
+#ifdef USB_FRNDIS_INCLUDED
        status = rndis_register(rndis_response_available, rndis);
        if (status < 0)
                goto fail;
        rndis->config = status;
+#endif
 
        rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
        rndis_set_host_mac(rndis->config, rndis->ethaddr);
@@ -787,15 +830,15 @@ fail:
        return status;
 }
 
+#ifdef USB_FRNDIS_INCLUDED
+
 static void
-rndis_unbind(struct usb_configuration *c, struct usb_function *f)
+rndis_old_unbind(struct usb_configuration *c, struct usb_function *f)
 {
        struct f_rndis          *rndis = func_to_rndis(f);
 
        rndis_deregister(rndis->config);
-       rndis_exit();
 
-       rndis_string_defs[0].id = 0;
        usb_free_all_descriptors(f);
 
        kfree(rndis->notify_req->buf);
@@ -804,13 +847,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
        kfree(rndis);
 }
 
-/* Some controllers can't support RNDIS ... */
-static inline bool can_support_rndis(struct usb_configuration *c)
-{
-       /* everything else is *presumably* fine */
-       return true;
-}
-
 int
 rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
                u32 vendorID, const char *manufacturer, struct eth_dev *dev)
@@ -818,24 +854,6 @@ rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        struct f_rndis  *rndis;
        int             status;
 
-       if (!can_support_rndis(c) || !ethaddr)
-               return -EINVAL;
-
-       if (rndis_string_defs[0].id == 0) {
-               /* ... and setup RNDIS itself */
-               status = rndis_init();
-               if (status < 0)
-                       return status;
-
-               status = usb_string_ids_tab(c->cdev, rndis_string_defs);
-               if (status)
-                       return status;
-
-               rndis_control_intf.iInterface = rndis_string_defs[0].id;
-               rndis_data_intf.iInterface = rndis_string_defs[1].id;
-               rndis_iad_descriptor.iFunction = rndis_string_defs[2].id;
-       }
-
        /* allocate and initialize one new instance */
        status = -ENOMEM;
        rndis = kzalloc(sizeof *rndis, GFP_KERNEL);
@@ -856,19 +874,178 @@ rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        rndis->port.unwrap = rndis_rm_hdr;
 
        rndis->port.func.name = "rndis";
-       rndis->port.func.strings = rndis_strings;
        /* descriptors are per-instance copies */
        rndis->port.func.bind = rndis_bind;
-       rndis->port.func.unbind = rndis_unbind;
+       rndis->port.func.unbind = rndis_old_unbind;
        rndis->port.func.set_alt = rndis_set_alt;
        rndis->port.func.setup = rndis_setup;
        rndis->port.func.disable = rndis_disable;
 
        status = usb_add_function(c, &rndis->port.func);
-       if (status) {
+       if (status)
                kfree(rndis);
 fail:
-               rndis_exit();
-       }
        return status;
 }
+
+#else
+
+void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
+{
+       struct f_rndis_opts *opts;
+
+       opts = container_of(f, struct f_rndis_opts, func_inst);
+       if (opts->bound)
+               gether_cleanup(netdev_priv(opts->net));
+       else
+               free_netdev(opts->net);
+       opts->borrowed_net = opts->bound = true;
+       opts->net = net;
+}
+EXPORT_SYMBOL(rndis_borrow_net);
+
+static inline struct f_rndis_opts *to_f_rndis_opts(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct f_rndis_opts,
+                           func_inst.group);
+}
+
+/* f_rndis_item_ops */
+USB_ETHERNET_CONFIGFS_ITEM(rndis);
+
+/* f_rndis_opts_dev_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(rndis);
+
+/* f_rndis_opts_host_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(rndis);
+
+/* f_rndis_opts_qmult */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(rndis);
+
+/* f_rndis_opts_ifname */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(rndis);
+
+static struct configfs_attribute *rndis_attrs[] = {
+       &f_rndis_opts_dev_addr.attr,
+       &f_rndis_opts_host_addr.attr,
+       &f_rndis_opts_qmult.attr,
+       &f_rndis_opts_ifname.attr,
+       NULL,
+};
+
+static struct config_item_type rndis_func_type = {
+       .ct_item_ops    = &rndis_item_ops,
+       .ct_attrs       = rndis_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static void rndis_free_inst(struct usb_function_instance *f)
+{
+       struct f_rndis_opts *opts;
+
+       opts = container_of(f, struct f_rndis_opts, func_inst);
+       if (!opts->borrowed_net) {
+               if (opts->bound)
+                       gether_cleanup(netdev_priv(opts->net));
+               else
+                       free_netdev(opts->net);
+       }
+       kfree(opts);
+}
+
+static struct usb_function_instance *rndis_alloc_inst(void)
+{
+       struct f_rndis_opts *opts;
+
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+       mutex_init(&opts->lock);
+       opts->func_inst.free_func_inst = rndis_free_inst;
+       opts->net = gether_setup_default();
+       if (IS_ERR(opts->net))
+               return ERR_CAST(opts->net);
+
+       config_group_init_type_name(&opts->func_inst.group, "",
+                                   &rndis_func_type);
+
+       return &opts->func_inst;
+}
+
+static void rndis_free(struct usb_function *f)
+{
+       struct f_rndis *rndis;
+       struct f_rndis_opts *opts;
+
+       rndis = func_to_rndis(f);
+       rndis_deregister(rndis->config);
+       opts = container_of(f->fi, struct f_rndis_opts, func_inst);
+       kfree(rndis);
+       mutex_lock(&opts->lock);
+       opts->refcnt--;
+       mutex_unlock(&opts->lock);
+}
+
+static void rndis_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct f_rndis          *rndis = func_to_rndis(f);
+
+       usb_free_all_descriptors(f);
+
+       kfree(rndis->notify_req->buf);
+       usb_ep_free_request(rndis->notify, rndis->notify_req);
+}
+
+static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
+{
+       struct f_rndis  *rndis;
+       struct f_rndis_opts *opts;
+       int status;
+
+       /* allocate and initialize one new instance */
+       rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+       if (!rndis)
+               return ERR_PTR(-ENOMEM);
+
+       opts = container_of(fi, struct f_rndis_opts, func_inst);
+       mutex_lock(&opts->lock);
+       opts->refcnt++;
+
+       gether_get_host_addr_u8(opts->net, rndis->ethaddr);
+       rndis->vendorID = opts->vendor_id;
+       rndis->manufacturer = opts->manufacturer;
+
+       rndis->port.ioport = netdev_priv(opts->net);
+       mutex_unlock(&opts->lock);
+       /* RNDIS activates when the host changes this filter */
+       rndis->port.cdc_filter = 0;
+
+       /* RNDIS has special (and complex) framing */
+       rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
+       rndis->port.wrap = rndis_add_header;
+       rndis->port.unwrap = rndis_rm_hdr;
+
+       rndis->port.func.name = "rndis";
+       /* descriptors are per-instance copies */
+       rndis->port.func.bind = rndis_bind;
+       rndis->port.func.unbind = rndis_unbind;
+       rndis->port.func.set_alt = rndis_set_alt;
+       rndis->port.func.setup = rndis_setup;
+       rndis->port.func.disable = rndis_disable;
+       rndis->port.func.free_func = rndis_free;
+
+       status = rndis_register(rndis_response_available, rndis);
+       if (status < 0) {
+               kfree(rndis);
+               return ERR_PTR(status);
+       }
+       rndis->config = status;
+
+       return &rndis->port.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(rndis, rndis_alloc_inst, rndis_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
+
+#endif
index 7be04b3424941f73c96d7a6de51d53101efe0f92..5601e1d96c4fabd7f8a44bc80703080495fba56a 100644 (file)
 
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/etherdevice.h>
 
 #include "u_ether.h"
-
+#include "u_ether_configfs.h"
+#include "u_gether.h"
 
 /*
  * This function packages a simple "CDC Subset" Ethernet port with no real
@@ -295,9 +297,40 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
 {
        struct usb_composite_dev *cdev = c->cdev;
        struct f_gether         *geth = func_to_geth(f);
+       struct usb_string       *us;
        int                     status;
        struct usb_ep           *ep;
 
+#ifndef USB_FSUBSET_INCLUDED
+       struct f_gether_opts    *gether_opts;
+
+       gether_opts = container_of(f->fi, struct f_gether_opts, func_inst);
+
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to gether_opts->bound access
+        */
+       if (!gether_opts->bound) {
+               mutex_lock(&gether_opts->lock);
+               gether_set_gadget(gether_opts->net, cdev->gadget);
+               status = gether_register_netdev(gether_opts->net);
+               mutex_unlock(&gether_opts->lock);
+               if (status)
+                       return status;
+               gether_opts->bound = true;
+       }
+#endif
+       us = usb_gstrings_attach(cdev, geth_strings,
+                                ARRAY_SIZE(geth_string_defs));
+       if (IS_ERR(us))
+               return PTR_ERR(us);
+
+       subset_data_intf.iInterface = us[0].id;
+       ether_desc.iMACAddress = us[1].id;
+
        /* allocate instance-specific interface IDs */
        status = usb_interface_id(c, f);
        if (status < 0)
@@ -360,8 +393,10 @@ fail:
        return status;
 }
 
+#ifdef USB_FSUBSET_INCLUDED
+
 static void
-geth_unbind(struct usb_configuration *c, struct usb_function *f)
+geth_old_unbind(struct usb_configuration *c, struct usb_function *f)
 {
        geth_string_defs[0].id = 0;
        usb_free_all_descriptors(f);
@@ -387,18 +422,6 @@ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        struct f_gether *geth;
        int             status;
 
-       if (!ethaddr)
-               return -EINVAL;
-
-       /* maybe allocate device-global string IDs */
-       if (geth_string_defs[0].id == 0) {
-               status = usb_string_ids_tab(c->cdev, geth_string_defs);
-               if (status < 0)
-                       return status;
-               subset_data_intf.iInterface = geth_string_defs[0].id;
-               ether_desc.iMACAddress = geth_string_defs[1].id;
-       }
-
        /* allocate and initialize one new instance */
        geth = kzalloc(sizeof *geth, GFP_KERNEL);
        if (!geth)
@@ -412,9 +435,8 @@ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        geth->port.cdc_filter = DEFAULT_FILTER;
 
        geth->port.func.name = "cdc_subset";
-       geth->port.func.strings = geth_strings;
        geth->port.func.bind = geth_bind;
-       geth->port.func.unbind = geth_unbind;
+       geth->port.func.unbind = geth_old_unbind;
        geth->port.func.set_alt = geth_set_alt;
        geth->port.func.disable = geth_disable;
 
@@ -423,3 +445,130 @@ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
                kfree(geth);
        return status;
 }
+
+#else
+
+static inline struct f_gether_opts *to_f_gether_opts(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct f_gether_opts,
+                           func_inst.group);
+}
+
+/* f_gether_item_ops */
+USB_ETHERNET_CONFIGFS_ITEM(gether);
+
+/* f_gether_opts_dev_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(gether);
+
+/* f_gether_opts_host_addr */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(gether);
+
+/* f_gether_opts_qmult */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(gether);
+
+/* f_gether_opts_ifname */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(gether);
+
+static struct configfs_attribute *gether_attrs[] = {
+       &f_gether_opts_dev_addr.attr,
+       &f_gether_opts_host_addr.attr,
+       &f_gether_opts_qmult.attr,
+       &f_gether_opts_ifname.attr,
+       NULL,
+};
+
+static struct config_item_type gether_func_type = {
+       .ct_item_ops    = &gether_item_ops,
+       .ct_attrs       = gether_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static void geth_free_inst(struct usb_function_instance *f)
+{
+       struct f_gether_opts *opts;
+
+       opts = container_of(f, struct f_gether_opts, func_inst);
+       if (opts->bound)
+               gether_cleanup(netdev_priv(opts->net));
+       else
+               free_netdev(opts->net);
+       kfree(opts);
+}
+
+static struct usb_function_instance *geth_alloc_inst(void)
+{
+       struct f_gether_opts *opts;
+
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+       mutex_init(&opts->lock);
+       opts->func_inst.free_func_inst = geth_free_inst;
+       opts->net = gether_setup_default();
+       if (IS_ERR(opts->net))
+               return ERR_CAST(opts->net);
+
+       config_group_init_type_name(&opts->func_inst.group, "",
+                                   &gether_func_type);
+
+       return &opts->func_inst;
+}
+
+static void geth_free(struct usb_function *f)
+{
+       struct f_gether *eth;
+
+       eth = func_to_geth(f);
+       kfree(eth);
+}
+
+static void geth_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       geth_string_defs[0].id = 0;
+       usb_free_all_descriptors(f);
+}
+
+static struct usb_function *geth_alloc(struct usb_function_instance *fi)
+{
+       struct f_gether *geth;
+       struct f_gether_opts *opts;
+       int status;
+
+       /* allocate and initialize one new instance */
+       geth = kzalloc(sizeof(*geth), GFP_KERNEL);
+       if (!geth)
+               return ERR_PTR(-ENOMEM);
+
+       opts = container_of(fi, struct f_gether_opts, func_inst);
+
+       mutex_lock(&opts->lock);
+       opts->refcnt++;
+       /* export host's Ethernet address in CDC format */
+       status = gether_get_host_addr_cdc(opts->net, geth->ethaddr,
+                                         sizeof(geth->ethaddr));
+       if (status < 12) {
+               kfree(geth);
+               mutex_unlock(&opts->lock);
+               return ERR_PTR(-EINVAL);
+       }
+       geth_string_defs[1].s = geth->ethaddr;
+
+       geth->port.ioport = netdev_priv(opts->net);
+       mutex_unlock(&opts->lock);
+       geth->port.cdc_filter = DEFAULT_FILTER;
+
+       geth->port.func.name = "cdc_subset";
+       geth->port.func.bind = geth_bind;
+       geth->port.func.unbind = geth_unbind;
+       geth->port.func.set_alt = geth_set_alt;
+       geth->port.func.disable = geth_disable;
+       geth->port.func.free_func = geth_free;
+
+       return &geth->port.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(geth, geth_alloc_inst, geth_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
+
+#endif
index 03c1fb686644e02d34b5b0f4a9c8e847d6a6de27..2f23566e53d88bf7fd618efcc100b49447c0cf27 100644 (file)
@@ -90,6 +90,7 @@ struct uac2_req {
 };
 
 struct uac2_rtd_params {
+       struct snd_uac2_chip *uac2; /* parent chip */
        bool ep_enabled; /* if the ep is enabled */
        /* Size of the ring buffer */
        size_t dma_bytes;
@@ -168,18 +169,6 @@ struct snd_uac2_chip *pdev_to_uac2(struct platform_device *p)
        return container_of(p, struct snd_uac2_chip, pdev);
 }
 
-static inline
-struct snd_uac2_chip *prm_to_uac2(struct uac2_rtd_params *r)
-{
-       struct snd_uac2_chip *uac2 = container_of(r,
-                                       struct snd_uac2_chip, c_prm);
-
-       if (&uac2->c_prm != r)
-               uac2 = container_of(r, struct snd_uac2_chip, p_prm);
-
-       return uac2;
-}
-
 static inline
 uint num_channels(uint chanmask)
 {
@@ -204,7 +193,7 @@ agdev_iso_complete(struct usb_ep *ep, struct usb_request *req)
        struct uac2_req *ur = req->context;
        struct snd_pcm_substream *substream;
        struct uac2_rtd_params *prm = ur->pp;
-       struct snd_uac2_chip *uac2 = prm_to_uac2(prm);
+       struct snd_uac2_chip *uac2 = prm->uac2;
 
        /* i/f shutting down */
        if (!prm->ep_enabled)
@@ -894,7 +883,7 @@ struct cntrl_range_lay3 {
 static inline void
 free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
 {
-       struct snd_uac2_chip *uac2 = prm_to_uac2(prm);
+       struct snd_uac2_chip *uac2 = prm->uac2;
        int i;
 
        prm->ep_enabled = false;
@@ -970,6 +959,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        }
        agdev->in_ep->driver_data = agdev;
 
+       uac2->p_prm.uac2 = uac2;
+       uac2->c_prm.uac2 = uac2;
+
        hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
        hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize;
        hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
index 38dcedddc52c0bb43b70e6c5faa8ac3857d5c466..5f91c7a599461be843f923d5513a882f135c2dbc 100644 (file)
@@ -156,8 +156,6 @@ static struct usb_endpoint_descriptor uvc_fs_streaming_ep __initdata = {
        /* The wMaxPacketSize and bInterval values will be initialized from
         * module parameters.
         */
-       .wMaxPacketSize         = 0,
-       .bInterval              = 0,
 };
 
 static struct usb_endpoint_descriptor uvc_hs_streaming_ep __initdata = {
@@ -169,8 +167,6 @@ static struct usb_endpoint_descriptor uvc_hs_streaming_ep __initdata = {
        /* The wMaxPacketSize and bInterval values will be initialized from
         * module parameters.
         */
-       .wMaxPacketSize         = 0,
-       .bInterval              = 0,
 };
 
 static struct usb_endpoint_descriptor uvc_ss_streaming_ep __initdata = {
@@ -183,17 +179,14 @@ static struct usb_endpoint_descriptor uvc_ss_streaming_ep __initdata = {
        /* The wMaxPacketSize and bInterval values will be initialized from
         * module parameters.
         */
-       .wMaxPacketSize         = 0,
-       .bInterval              = 0,
 };
 
 static struct usb_ss_ep_comp_descriptor uvc_ss_streaming_comp __initdata = {
        .bLength                = sizeof(uvc_ss_streaming_comp),
        .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
-       /* The following 3 values can be tweaked if necessary. */
-       .bMaxBurst              = 0,
-       .bmAttributes           = 0,
-       .wBytesPerInterval      = cpu_to_le16(1024),
+       /* The bMaxBurst, bmAttributes and wBytesPerInterval values will be
+        * initialized from module parameters.
+        */
 };
 
 static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
new file mode 100644 (file)
index 0000000..cce5535
--- /dev/null
@@ -0,0 +1,1219 @@
+/*
+ * FOTG210 UDC Driver supports Bulk transfer so far
+ *
+ * Copyright (C) 2013 Faraday Technology Corporation
+ *
+ * Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "fotg210.h"
+
+#define        DRIVER_DESC     "FOTG210 USB Device Controller Driver"
+#define        DRIVER_VERSION  "30-April-2013"
+
+static const char udc_name[] = "fotg210_udc";
+static const char * const fotg210_ep_name[] = {
+       "ep0", "ep1", "ep2", "ep3", "ep4"};
+
+static void fotg210_disable_fifo_int(struct fotg210_ep *ep)
+{
+       u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
+
+       if (ep->dir_in)
+               value |= DMISGR1_MF_IN_INT(ep->epnum - 1);
+       else
+               value |= DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
+       iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
+}
+
+static void fotg210_enable_fifo_int(struct fotg210_ep *ep)
+{
+       u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
+
+       if (ep->dir_in)
+               value &= ~DMISGR1_MF_IN_INT(ep->epnum - 1);
+       else
+               value &= ~DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
+       iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
+}
+
+static void fotg210_set_cxdone(struct fotg210_udc *fotg210)
+{
+       u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
+
+       value |= DCFESR_CX_DONE;
+       iowrite32(value, fotg210->reg + FOTG210_DCFESR);
+}
+
+static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req,
+                       int status)
+{
+       list_del_init(&req->queue);
+
+       /* don't modify queue heads during completion callback */
+       if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
+               req->req.status = -ESHUTDOWN;
+       else
+               req->req.status = status;
+
+       spin_unlock(&ep->fotg210->lock);
+       req->req.complete(&ep->ep, &req->req);
+       spin_lock(&ep->fotg210->lock);
+
+       if (ep->epnum) {
+               if (list_empty(&ep->queue))
+                       fotg210_disable_fifo_int(ep);
+       } else {
+               fotg210_set_cxdone(ep->fotg210);
+       }
+}
+
+static void fotg210_fifo_ep_mapping(struct fotg210_ep *ep, u32 epnum,
+                               u32 dir_in)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+       u32 val;
+
+       /* Driver should map an ep to a fifo and then map the fifo
+        * to the ep. What a brain-damaged design!
+        */
+
+       /* map a fifo to an ep */
+       val = ioread32(fotg210->reg + FOTG210_EPMAP);
+       val &= ~EPMAP_FIFONOMSK(epnum, dir_in);
+       val |= EPMAP_FIFONO(epnum, dir_in);
+       iowrite32(val, fotg210->reg + FOTG210_EPMAP);
+
+       /* map the ep to the fifo */
+       val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
+       val &= ~FIFOMAP_EPNOMSK(epnum);
+       val |= FIFOMAP_EPNO(epnum);
+       iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
+
+       /* enable fifo */
+       val = ioread32(fotg210->reg + FOTG210_FIFOCF);
+       val |= FIFOCF_FIFO_EN(epnum - 1);
+       iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
+}
+
+static void fotg210_set_fifo_dir(struct fotg210_ep *ep, u32 epnum, u32 dir_in)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+       u32 val;
+
+       val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
+       val |= (dir_in ? FIFOMAP_DIRIN(epnum - 1) : FIFOMAP_DIROUT(epnum - 1));
+       iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
+}
+
+static void fotg210_set_tfrtype(struct fotg210_ep *ep, u32 epnum, u32 type)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+       u32 val;
+
+       val = ioread32(fotg210->reg + FOTG210_FIFOCF);
+       val |= FIFOCF_TYPE(type, epnum - 1);
+       iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
+}
+
+static void fotg210_set_mps(struct fotg210_ep *ep, u32 epnum, u32 mps,
+                               u32 dir_in)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+       u32 val;
+       u32 offset = dir_in ? FOTG210_INEPMPSR(epnum) :
+                               FOTG210_OUTEPMPSR(epnum);
+
+       val = ioread32(fotg210->reg + offset);
+       val |= INOUTEPMPSR_MPS(mps);
+       iowrite32(val, fotg210->reg + offset);
+}
+
+static int fotg210_config_ep(struct fotg210_ep *ep,
+                    const struct usb_endpoint_descriptor *desc)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+
+       fotg210_set_fifo_dir(ep, ep->epnum, ep->dir_in);
+       fotg210_set_tfrtype(ep, ep->epnum, ep->type);
+       fotg210_set_mps(ep, ep->epnum, ep->ep.maxpacket, ep->dir_in);
+       fotg210_fifo_ep_mapping(ep, ep->epnum, ep->dir_in);
+
+       fotg210->ep[ep->epnum] = ep;
+
+       return 0;
+}
+
+static int fotg210_ep_enable(struct usb_ep *_ep,
+                         const struct usb_endpoint_descriptor *desc)
+{
+       struct fotg210_ep *ep;
+
+       ep = container_of(_ep, struct fotg210_ep, ep);
+
+       ep->desc = desc;
+       ep->epnum = usb_endpoint_num(desc);
+       ep->type = usb_endpoint_type(desc);
+       ep->dir_in = usb_endpoint_dir_in(desc);
+       ep->ep.maxpacket = usb_endpoint_maxp(desc);
+
+       return fotg210_config_ep(ep, desc);
+}
+
+static void fotg210_reset_tseq(struct fotg210_udc *fotg210, u8 epnum)
+{
+       struct fotg210_ep *ep = fotg210->ep[epnum];
+       u32 value;
+       void __iomem *reg;
+
+       reg = (ep->dir_in) ?
+               fotg210->reg + FOTG210_INEPMPSR(epnum) :
+               fotg210->reg + FOTG210_OUTEPMPSR(epnum);
+
+       /* Note: Driver needs to set and clear INOUTEPMPSR_RESET_TSEQ
+        *       bit. Controller wouldn't clear this bit. WTF!!!
+        */
+
+       value = ioread32(reg);
+       value |= INOUTEPMPSR_RESET_TSEQ;
+       iowrite32(value, reg);
+
+       value = ioread32(reg);
+       value &= ~INOUTEPMPSR_RESET_TSEQ;
+       iowrite32(value, reg);
+}
+
+static int fotg210_ep_release(struct fotg210_ep *ep)
+{
+       if (!ep->epnum)
+               return 0;
+       ep->epnum = 0;
+       ep->stall = 0;
+       ep->wedged = 0;
+
+       fotg210_reset_tseq(ep->fotg210, ep->epnum);
+
+       return 0;
+}
+
+static int fotg210_ep_disable(struct usb_ep *_ep)
+{
+       struct fotg210_ep *ep;
+       struct fotg210_request *req;
+       unsigned long flags;
+
+       BUG_ON(!_ep);
+
+       ep = container_of(_ep, struct fotg210_ep, ep);
+
+       while (!list_empty(&ep->queue)) {
+               req = list_entry(ep->queue.next,
+                       struct fotg210_request, queue);
+               spin_lock_irqsave(&ep->fotg210->lock, flags);
+               fotg210_done(ep, req, -ECONNRESET);
+               spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+       }
+
+       return fotg210_ep_release(ep);
+}
+
+static struct usb_request *fotg210_ep_alloc_request(struct usb_ep *_ep,
+                                               gfp_t gfp_flags)
+{
+       struct fotg210_request *req;
+
+       req = kzalloc(sizeof(struct fotg210_request), gfp_flags);
+       if (!req)
+               return NULL;
+
+       INIT_LIST_HEAD(&req->queue);
+
+       return &req->req;
+}
+
+static void fotg210_ep_free_request(struct usb_ep *_ep,
+                                       struct usb_request *_req)
+{
+       struct fotg210_request *req;
+
+       req = container_of(_req, struct fotg210_request, req);
+       kfree(req);
+}
+
+static void fotg210_enable_dma(struct fotg210_ep *ep,
+                             dma_addr_t d, u32 len)
+{
+       u32 value;
+       struct fotg210_udc *fotg210 = ep->fotg210;
+
+       /* set transfer length and direction */
+       value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
+       value &= ~(DMACPSR1_DMA_LEN(0xFFFF) | DMACPSR1_DMA_TYPE(1));
+       value |= DMACPSR1_DMA_LEN(len) | DMACPSR1_DMA_TYPE(ep->dir_in);
+       iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
+
+       /* set device DMA target FIFO number */
+       value = ioread32(fotg210->reg + FOTG210_DMATFNR);
+       if (ep->epnum)
+               value |= DMATFNR_ACC_FN(ep->epnum - 1);
+       else
+               value |= DMATFNR_ACC_CXF;
+       iowrite32(value, fotg210->reg + FOTG210_DMATFNR);
+
+       /* set DMA memory address */
+       iowrite32(d, fotg210->reg + FOTG210_DMACPSR2);
+
+       /* enable MDMA_EROR and MDMA_CMPLT interrupt */
+       value = ioread32(fotg210->reg + FOTG210_DMISGR2);
+       value &= ~(DMISGR2_MDMA_CMPLT | DMISGR2_MDMA_ERROR);
+       iowrite32(value, fotg210->reg + FOTG210_DMISGR2);
+
+       /* start DMA */
+       value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
+       value |= DMACPSR1_DMA_START;
+       iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
+}
+
+static void fotg210_disable_dma(struct fotg210_ep *ep)
+{
+       iowrite32(DMATFNR_DISDMA, ep->fotg210->reg + FOTG210_DMATFNR);
+}
+
+static void fotg210_wait_dma_done(struct fotg210_ep *ep)
+{
+       u32 value;
+
+       do {
+               value = ioread32(ep->fotg210->reg + FOTG210_DISGR2);
+               if ((value & DISGR2_USBRST_INT) ||
+                   (value & DISGR2_DMA_ERROR))
+                       goto dma_reset;
+       } while (!(value & DISGR2_DMA_CMPLT));
+
+       value &= ~DISGR2_DMA_CMPLT;
+       iowrite32(value, ep->fotg210->reg + FOTG210_DISGR2);
+       return;
+
+dma_reset:
+       value = ioread32(ep->fotg210->reg + FOTG210_DMACPSR1);
+       value |= DMACPSR1_DMA_ABORT;
+       iowrite32(value, ep->fotg210->reg + FOTG210_DMACPSR1);
+
+       /* reset fifo */
+       if (ep->epnum) {
+               value = ioread32(ep->fotg210->reg +
+                               FOTG210_FIBCR(ep->epnum - 1));
+               value |= FIBCR_FFRST;
+               iowrite32(value, ep->fotg210->reg +
+                               FOTG210_FIBCR(ep->epnum - 1));
+       } else {
+               value = ioread32(ep->fotg210->reg + FOTG210_DCFESR);
+               value |= DCFESR_CX_CLR;
+               iowrite32(value, ep->fotg210->reg + FOTG210_DCFESR);
+       }
+}
+
+static void fotg210_start_dma(struct fotg210_ep *ep,
+                       struct fotg210_request *req)
+{
+       dma_addr_t d;
+       u8 *buffer;
+       u32 length;
+
+       if (ep->epnum) {
+               if (ep->dir_in) {
+                       buffer = req->req.buf;
+                       length = req->req.length;
+               } else {
+                       buffer = req->req.buf + req->req.actual;
+                       length = ioread32(ep->fotg210->reg +
+                                       FOTG210_FIBCR(ep->epnum - 1));
+                       length &= FIBCR_BCFX;
+               }
+       } else {
+               buffer = req->req.buf + req->req.actual;
+               if (req->req.length - req->req.actual > ep->ep.maxpacket)
+                       length = ep->ep.maxpacket;
+               else
+                       length = req->req.length;
+       }
+
+       d = dma_map_single(NULL, buffer, length,
+                       ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(NULL, d)) {
+               pr_err("dma_mapping_error\n");
+               return;
+       }
+
+       dma_sync_single_for_device(NULL, d, length,
+                                  ep->dir_in ? DMA_TO_DEVICE :
+                                       DMA_FROM_DEVICE);
+
+       fotg210_enable_dma(ep, d, length);
+
+       /* check if dma is done */
+       fotg210_wait_dma_done(ep);
+
+       fotg210_disable_dma(ep);
+
+       /* update actual transfer length */
+       req->req.actual += length;
+
+       dma_unmap_single(NULL, d, length, DMA_TO_DEVICE);
+}
+
+static void fotg210_ep0_queue(struct fotg210_ep *ep,
+                               struct fotg210_request *req)
+{
+       if (!req->req.length) {
+               fotg210_done(ep, req, 0);
+               return;
+       }
+       if (ep->dir_in) { /* if IN */
+               if (req->req.length) {
+                       fotg210_start_dma(ep, req);
+               } else {
+                       pr_err("%s : req->req.length = 0x%x\n",
+                              __func__, req->req.length);
+               }
+               if ((req->req.length == req->req.actual) ||
+                   (req->req.actual < ep->ep.maxpacket))
+                       fotg210_done(ep, req, 0);
+       } else { /* OUT */
+               if (!req->req.length) {
+                       fotg210_done(ep, req, 0);
+               } else {
+                       u32 value = ioread32(ep->fotg210->reg +
+                                               FOTG210_DMISGR0);
+
+                       value &= ~DMISGR0_MCX_OUT_INT;
+                       iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
+               }
+       }
+}
+
+static int fotg210_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+                               gfp_t gfp_flags)
+{
+       struct fotg210_ep *ep;
+       struct fotg210_request *req;
+       unsigned long flags;
+       int request = 0;
+
+       ep = container_of(_ep, struct fotg210_ep, ep);
+       req = container_of(_req, struct fotg210_request, req);
+
+       if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
+               return -ESHUTDOWN;
+
+       spin_lock_irqsave(&ep->fotg210->lock, flags);
+
+       if (list_empty(&ep->queue))
+               request = 1;
+
+       list_add_tail(&req->queue, &ep->queue);
+
+       req->req.actual = 0;
+       req->req.status = -EINPROGRESS;
+
+       if (!ep->epnum) /* ep0 */
+               fotg210_ep0_queue(ep, req);
+       else if (request && !ep->stall)
+               fotg210_enable_fifo_int(ep);
+
+       spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+
+       return 0;
+}
+
+static int fotg210_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+       struct fotg210_ep *ep;
+       struct fotg210_request *req;
+       unsigned long flags;
+
+       ep = container_of(_ep, struct fotg210_ep, ep);
+       req = container_of(_req, struct fotg210_request, req);
+
+       spin_lock_irqsave(&ep->fotg210->lock, flags);
+       if (!list_empty(&ep->queue))
+               fotg210_done(ep, req, -ECONNRESET);
+       spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+
+       return 0;
+}
+
+static void fotg210_set_epnstall(struct fotg210_ep *ep)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+       u32 value;
+       void __iomem *reg;
+
+       /* check if IN FIFO is empty before stall */
+       if (ep->dir_in) {
+               do {
+                       value = ioread32(fotg210->reg + FOTG210_DCFESR);
+               } while (!(value & DCFESR_FIFO_EMPTY(ep->epnum - 1)));
+       }
+
+       reg = (ep->dir_in) ?
+               fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
+               fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
+       value = ioread32(reg);
+       value |= INOUTEPMPSR_STL_EP;
+       iowrite32(value, reg);
+}
+
+static void fotg210_clear_epnstall(struct fotg210_ep *ep)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+       u32 value;
+       void __iomem *reg;
+
+       reg = (ep->dir_in) ?
+               fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
+               fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
+       value = ioread32(reg);
+       value &= ~INOUTEPMPSR_STL_EP;
+       iowrite32(value, reg);
+}
+
+static int fotg210_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
+{
+       struct fotg210_ep *ep;
+       struct fotg210_udc *fotg210;
+       unsigned long flags;
+       int ret = 0;
+
+       ep = container_of(_ep, struct fotg210_ep, ep);
+
+       fotg210 = ep->fotg210;
+
+       spin_lock_irqsave(&ep->fotg210->lock, flags);
+
+       if (value) {
+               fotg210_set_epnstall(ep);
+               ep->stall = 1;
+               if (wedge)
+                       ep->wedged = 1;
+       } else {
+               fotg210_reset_tseq(fotg210, ep->epnum);
+               fotg210_clear_epnstall(ep);
+               ep->stall = 0;
+               ep->wedged = 0;
+               if (!list_empty(&ep->queue))
+                       fotg210_enable_fifo_int(ep);
+       }
+
+       spin_unlock_irqrestore(&ep->fotg210->lock, flags);
+       return ret;
+}
+
+static int fotg210_ep_set_halt(struct usb_ep *_ep, int value)
+{
+       return fotg210_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int fotg210_ep_set_wedge(struct usb_ep *_ep)
+{
+       return fotg210_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static void fotg210_ep_fifo_flush(struct usb_ep *_ep)
+{
+}
+
+static struct usb_ep_ops fotg210_ep_ops = {
+       .enable         = fotg210_ep_enable,
+       .disable        = fotg210_ep_disable,
+
+       .alloc_request  = fotg210_ep_alloc_request,
+       .free_request   = fotg210_ep_free_request,
+
+       .queue          = fotg210_ep_queue,
+       .dequeue        = fotg210_ep_dequeue,
+
+       .set_halt       = fotg210_ep_set_halt,
+       .fifo_flush     = fotg210_ep_fifo_flush,
+       .set_wedge      = fotg210_ep_set_wedge,
+};
+
+static void fotg210_clear_tx0byte(struct fotg210_udc *fotg210)
+{
+       u32 value = ioread32(fotg210->reg + FOTG210_TX0BYTE);
+
+       value &= ~(TX0BYTE_EP1 | TX0BYTE_EP2 | TX0BYTE_EP3
+                  | TX0BYTE_EP4);
+       iowrite32(value, fotg210->reg + FOTG210_TX0BYTE);
+}
+
+static void fotg210_clear_rx0byte(struct fotg210_udc *fotg210)
+{
+       u32 value = ioread32(fotg210->reg + FOTG210_RX0BYTE);
+
+       value &= ~(RX0BYTE_EP1 | RX0BYTE_EP2 | RX0BYTE_EP3
+                  | RX0BYTE_EP4);
+       iowrite32(value, fotg210->reg + FOTG210_RX0BYTE);
+}
+
+/* read 8-byte setup packet only */
+static void fotg210_rdsetupp(struct fotg210_udc *fotg210,
+                  u8 *buffer)
+{
+       int i = 0;
+       u8 *tmp = buffer;
+       u32 data;
+       u32 length = 8;
+
+       iowrite32(DMATFNR_ACC_CXF, fotg210->reg + FOTG210_DMATFNR);
+
+       for (i = (length >> 2); i > 0; i--) {
+               data = ioread32(fotg210->reg + FOTG210_CXPORT);
+               *tmp = data & 0xFF;
+               *(tmp + 1) = (data >> 8) & 0xFF;
+               *(tmp + 2) = (data >> 16) & 0xFF;
+               *(tmp + 3) = (data >> 24) & 0xFF;
+               tmp = tmp + 4;
+       }
+
+       switch (length % 4) {
+       case 1:
+               data = ioread32(fotg210->reg + FOTG210_CXPORT);
+               *tmp = data & 0xFF;
+               break;
+       case 2:
+               data = ioread32(fotg210->reg + FOTG210_CXPORT);
+               *tmp = data & 0xFF;
+               *(tmp + 1) = (data >> 8) & 0xFF;
+               break;
+       case 3:
+               data = ioread32(fotg210->reg + FOTG210_CXPORT);
+               *tmp = data & 0xFF;
+               *(tmp + 1) = (data >> 8) & 0xFF;
+               *(tmp + 2) = (data >> 16) & 0xFF;
+               break;
+       default:
+               break;
+       }
+
+       iowrite32(DMATFNR_DISDMA, fotg210->reg + FOTG210_DMATFNR);
+}
+
+static void fotg210_set_configuration(struct fotg210_udc *fotg210)
+{
+       u32 value = ioread32(fotg210->reg + FOTG210_DAR);
+
+       value |= DAR_AFT_CONF;
+       iowrite32(value, fotg210->reg + FOTG210_DAR);
+}
+
+static void fotg210_set_dev_addr(struct fotg210_udc *fotg210, u32 addr)
+{
+       u32 value = ioread32(fotg210->reg + FOTG210_DAR);
+
+       value |= (addr & 0x7F);
+       iowrite32(value, fotg210->reg + FOTG210_DAR);
+}
+
+static void fotg210_set_cxstall(struct fotg210_udc *fotg210)
+{
+       u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
+
+       value |= DCFESR_CX_STL;
+       iowrite32(value, fotg210->reg + FOTG210_DCFESR);
+}
+
+static void fotg210_request_error(struct fotg210_udc *fotg210)
+{
+       fotg210_set_cxstall(fotg210);
+       pr_err("request error!!\n");
+}
+
+static void fotg210_set_address(struct fotg210_udc *fotg210,
+                               struct usb_ctrlrequest *ctrl)
+{
+       if (ctrl->wValue >= 0x0100) {
+               fotg210_request_error(fotg210);
+       } else {
+               fotg210_set_dev_addr(fotg210, ctrl->wValue);
+               fotg210_set_cxdone(fotg210);
+       }
+}
+
+static void fotg210_set_feature(struct fotg210_udc *fotg210,
+                               struct usb_ctrlrequest *ctrl)
+{
+       switch (ctrl->bRequestType & USB_RECIP_MASK) {
+       case USB_RECIP_DEVICE:
+               fotg210_set_cxdone(fotg210);
+               break;
+       case USB_RECIP_INTERFACE:
+               fotg210_set_cxdone(fotg210);
+               break;
+       case USB_RECIP_ENDPOINT: {
+               u8 epnum;
+               epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+               if (epnum)
+                       fotg210_set_epnstall(fotg210->ep[epnum]);
+               else
+                       fotg210_set_cxstall(fotg210);
+               fotg210_set_cxdone(fotg210);
+               }
+               break;
+       default:
+               fotg210_request_error(fotg210);
+               break;
+       }
+}
+
+static void fotg210_clear_feature(struct fotg210_udc *fotg210,
+                               struct usb_ctrlrequest *ctrl)
+{
+       struct fotg210_ep *ep =
+               fotg210->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
+
+       switch (ctrl->bRequestType & USB_RECIP_MASK) {
+       case USB_RECIP_DEVICE:
+               fotg210_set_cxdone(fotg210);
+               break;
+       case USB_RECIP_INTERFACE:
+               fotg210_set_cxdone(fotg210);
+               break;
+       case USB_RECIP_ENDPOINT:
+               if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
+                       if (ep->wedged) {
+                               fotg210_set_cxdone(fotg210);
+                               break;
+                       }
+                       if (ep->stall)
+                               fotg210_set_halt_and_wedge(&ep->ep, 0, 0);
+               }
+               fotg210_set_cxdone(fotg210);
+               break;
+       default:
+               fotg210_request_error(fotg210);
+               break;
+       }
+}
+
+static int fotg210_is_epnstall(struct fotg210_ep *ep)
+{
+       struct fotg210_udc *fotg210 = ep->fotg210;
+       u32 value;
+       void __iomem *reg;
+
+       reg = (ep->dir_in) ?
+               fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
+               fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
+       value = ioread32(reg);
+       return value & INOUTEPMPSR_STL_EP ? 1 : 0;
+}
+
+static void fotg210_get_status(struct fotg210_udc *fotg210,
+                               struct usb_ctrlrequest *ctrl)
+{
+       u8 epnum;
+
+       switch (ctrl->bRequestType & USB_RECIP_MASK) {
+       case USB_RECIP_DEVICE:
+               fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED;
+               break;
+       case USB_RECIP_INTERFACE:
+               fotg210->ep0_data = 0;
+               break;
+       case USB_RECIP_ENDPOINT:
+               epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
+               if (epnum)
+                       fotg210->ep0_data =
+                               fotg210_is_epnstall(fotg210->ep[epnum])
+                               << USB_ENDPOINT_HALT;
+               else
+                       fotg210_request_error(fotg210);
+               break;
+
+       default:
+               fotg210_request_error(fotg210);
+               return;         /* exit */
+       }
+
+       fotg210->ep0_req->buf = &fotg210->ep0_data;
+       fotg210->ep0_req->length = 2;
+
+       spin_unlock(&fotg210->lock);
+       fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL);
+       spin_lock(&fotg210->lock);
+}
+
+static int fotg210_setup_packet(struct fotg210_udc *fotg210,
+                               struct usb_ctrlrequest *ctrl)
+{
+       u8 *p = (u8 *)ctrl;
+       u8 ret = 0;
+
+       fotg210_rdsetupp(fotg210, p);
+
+       fotg210->ep[0]->dir_in = ctrl->bRequestType & USB_DIR_IN;
+
+       if (fotg210->gadget.speed == USB_SPEED_UNKNOWN) {
+               u32 value = ioread32(fotg210->reg + FOTG210_DMCR);
+               fotg210->gadget.speed = value & DMCR_HS_EN ?
+                               USB_SPEED_HIGH : USB_SPEED_FULL;
+       }
+
+       /* check request */
+       if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+               switch (ctrl->bRequest) {
+               case USB_REQ_GET_STATUS:
+                       fotg210_get_status(fotg210, ctrl);
+                       break;
+               case USB_REQ_CLEAR_FEATURE:
+                       fotg210_clear_feature(fotg210, ctrl);
+                       break;
+               case USB_REQ_SET_FEATURE:
+                       fotg210_set_feature(fotg210, ctrl);
+                       break;
+               case USB_REQ_SET_ADDRESS:
+                       fotg210_set_address(fotg210, ctrl);
+                       break;
+               case USB_REQ_SET_CONFIGURATION:
+                       fotg210_set_configuration(fotg210);
+                       ret = 1;
+                       break;
+               default:
+                       ret = 1;
+                       break;
+               }
+       } else {
+               ret = 1;
+       }
+
+       return ret;
+}
+
+static void fotg210_ep0out(struct fotg210_udc *fotg210)
+{
+       struct fotg210_ep *ep = fotg210->ep[0];
+
+       if (!list_empty(&ep->queue) && !ep->dir_in) {
+               struct fotg210_request *req;
+
+               req = list_first_entry(&ep->queue,
+                       struct fotg210_request, queue);
+
+               if (req->req.length)
+                       fotg210_start_dma(ep, req);
+
+               if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
+                       fotg210_done(ep, req, 0);
+       } else {
+               pr_err("%s : empty queue\n", __func__);
+       }
+}
+
+static void fotg210_ep0in(struct fotg210_udc *fotg210)
+{
+       struct fotg210_ep *ep = fotg210->ep[0];
+
+       if ((!list_empty(&ep->queue)) && (ep->dir_in)) {
+               struct fotg210_request *req;
+
+               req = list_entry(ep->queue.next,
+                               struct fotg210_request, queue);
+
+               if (req->req.length)
+                       fotg210_start_dma(ep, req);
+
+               if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
+                       fotg210_done(ep, req, 0);
+       } else {
+               fotg210_set_cxdone(fotg210);
+       }
+}
+
+static void fotg210_clear_comabt_int(struct fotg210_udc *fotg210)
+{
+       u32 value = ioread32(fotg210->reg + FOTG210_DISGR0);
+
+       value &= ~DISGR0_CX_COMABT_INT;
+       iowrite32(value, fotg210->reg + FOTG210_DISGR0);
+}
+
+static void fotg210_in_fifo_handler(struct fotg210_ep *ep)
+{
+       struct fotg210_request *req = list_entry(ep->queue.next,
+                                       struct fotg210_request, queue);
+
+       if (req->req.length)
+               fotg210_start_dma(ep, req);
+       fotg210_done(ep, req, 0);
+}
+
+static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
+{
+       struct fotg210_request *req = list_entry(ep->queue.next,
+                                                struct fotg210_request, queue);
+
+       fotg210_start_dma(ep, req);
+
+       /* finish out transfer */
+       if (req->req.length == req->req.actual ||
+           req->req.actual < ep->ep.maxpacket)
+               fotg210_done(ep, req, 0);
+}
+
+static irqreturn_t fotg210_irq(int irq, void *_fotg210)
+{
+       struct fotg210_udc *fotg210 = _fotg210;
+       u32 int_grp = ioread32(fotg210->reg + FOTG210_DIGR);
+       u32 int_msk = ioread32(fotg210->reg + FOTG210_DMIGR);
+
+       int_grp &= ~int_msk;
+
+       spin_lock(&fotg210->lock);
+
+       if (int_grp & DIGR_INT_G2) {
+               void __iomem *reg = fotg210->reg + FOTG210_DISGR2;
+               u32 int_grp2 = ioread32(reg);
+               u32 int_msk2 = ioread32(fotg210->reg + FOTG210_DMISGR2);
+               u32 value;
+
+               int_grp2 &= ~int_msk2;
+
+               if (int_grp2 & DISGR2_USBRST_INT) {
+                       value = ioread32(reg);
+                       value &= ~DISGR2_USBRST_INT;
+                       iowrite32(value, reg);
+                       pr_info("fotg210 udc reset\n");
+               }
+               if (int_grp2 & DISGR2_SUSP_INT) {
+                       value = ioread32(reg);
+                       value &= ~DISGR2_SUSP_INT;
+                       iowrite32(value, reg);
+                       pr_info("fotg210 udc suspend\n");
+               }
+               if (int_grp2 & DISGR2_RESM_INT) {
+                       value = ioread32(reg);
+                       value &= ~DISGR2_RESM_INT;
+                       iowrite32(value, reg);
+                       pr_info("fotg210 udc resume\n");
+               }
+               if (int_grp2 & DISGR2_ISO_SEQ_ERR_INT) {
+                       value = ioread32(reg);
+                       value &= ~DISGR2_ISO_SEQ_ERR_INT;
+                       iowrite32(value, reg);
+                       pr_info("fotg210 iso sequence error\n");
+               }
+               if (int_grp2 & DISGR2_ISO_SEQ_ABORT_INT) {
+                       value = ioread32(reg);
+                       value &= ~DISGR2_ISO_SEQ_ABORT_INT;
+                       iowrite32(value, reg);
+                       pr_info("fotg210 iso sequence abort\n");
+               }
+               if (int_grp2 & DISGR2_TX0BYTE_INT) {
+                       fotg210_clear_tx0byte(fotg210);
+                       value = ioread32(reg);
+                       value &= ~DISGR2_TX0BYTE_INT;
+                       iowrite32(value, reg);
+                       pr_info("fotg210 transferred 0 byte\n");
+               }
+               if (int_grp2 & DISGR2_RX0BYTE_INT) {
+                       fotg210_clear_rx0byte(fotg210);
+                       value = ioread32(reg);
+                       value &= ~DISGR2_RX0BYTE_INT;
+                       iowrite32(value, reg);
+                       pr_info("fotg210 received 0 byte\n");
+               }
+               if (int_grp2 & DISGR2_DMA_ERROR) {
+                       value = ioread32(reg);
+                       value &= ~DISGR2_DMA_ERROR;
+                       iowrite32(value, reg);
+               }
+       }
+
+       if (int_grp & DIGR_INT_G0) {
+               void __iomem *reg = fotg210->reg + FOTG210_DISGR0;
+               u32 int_grp0 = ioread32(reg);
+               u32 int_msk0 = ioread32(fotg210->reg + FOTG210_DMISGR0);
+               struct usb_ctrlrequest ctrl;
+
+               int_grp0 &= ~int_msk0;
+
+               /* the highest priority in this source register */
+               if (int_grp0 & DISGR0_CX_COMABT_INT) {
+                       fotg210_clear_comabt_int(fotg210);
+                       pr_info("fotg210 CX command abort\n");
+               }
+
+               if (int_grp0 & DISGR0_CX_SETUP_INT) {
+                       if (fotg210_setup_packet(fotg210, &ctrl)) {
+                               spin_unlock(&fotg210->lock);
+                               if (fotg210->driver->setup(&fotg210->gadget,
+                                                          &ctrl) < 0)
+                                       fotg210_set_cxstall(fotg210);
+                               spin_lock(&fotg210->lock);
+                       }
+               }
+               if (int_grp0 & DISGR0_CX_COMEND_INT)
+                       pr_info("fotg210 cmd end\n");
+
+               if (int_grp0 & DISGR0_CX_IN_INT)
+                       fotg210_ep0in(fotg210);
+
+               if (int_grp0 & DISGR0_CX_OUT_INT)
+                       fotg210_ep0out(fotg210);
+
+               if (int_grp0 & DISGR0_CX_COMFAIL_INT) {
+                       fotg210_set_cxstall(fotg210);
+                       pr_info("fotg210 ep0 fail\n");
+               }
+       }
+
+       if (int_grp & DIGR_INT_G1) {
+               void __iomem *reg = fotg210->reg + FOTG210_DISGR1;
+               u32 int_grp1 = ioread32(reg);
+               u32 int_msk1 = ioread32(fotg210->reg + FOTG210_DMISGR1);
+               int fifo;
+
+               int_grp1 &= ~int_msk1;
+
+               for (fifo = 0; fifo < FOTG210_MAX_FIFO_NUM; fifo++) {
+                       if (int_grp1 & DISGR1_IN_INT(fifo))
+                               fotg210_in_fifo_handler(fotg210->ep[fifo + 1]);
+
+                       if ((int_grp1 & DISGR1_OUT_INT(fifo)) ||
+                           (int_grp1 & DISGR1_SPK_INT(fifo)))
+                               fotg210_out_fifo_handler(fotg210->ep[fifo + 1]);
+               }
+       }
+
+       spin_unlock(&fotg210->lock);
+
+       return IRQ_HANDLED;
+}
+
+static void fotg210_disable_unplug(struct fotg210_udc *fotg210)
+{
+       u32 reg = ioread32(fotg210->reg + FOTG210_PHYTMSR);
+
+       reg &= ~PHYTMSR_UNPLUG;
+       iowrite32(reg, fotg210->reg + FOTG210_PHYTMSR);
+}
+
+static int fotg210_udc_start(struct usb_gadget *g,
+               struct usb_gadget_driver *driver)
+{
+       struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
+       u32 value;
+
+       /* hook up the driver */
+       driver->driver.bus = NULL;
+       fotg210->driver = driver;
+
+       /* enable device global interrupt */
+       value = ioread32(fotg210->reg + FOTG210_DMCR);
+       value |= DMCR_GLINT_EN;
+       iowrite32(value, fotg210->reg + FOTG210_DMCR);
+
+       return 0;
+}
+
+static void fotg210_init(struct fotg210_udc *fotg210)
+{
+       u32 value;
+
+       /* disable global interrupt and set int polarity to active high */
+       iowrite32(GMIR_MHC_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
+                 fotg210->reg + FOTG210_GMIR);
+
+       /* disable device global interrupt */
+       value = ioread32(fotg210->reg + FOTG210_DMCR);
+       value &= ~DMCR_GLINT_EN;
+       iowrite32(value, fotg210->reg + FOTG210_DMCR);
+
+       /* disable all fifo interrupt */
+       iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
+
+       /* disable cmd end */
+       value = ioread32(fotg210->reg + FOTG210_DMISGR0);
+       value |= DMISGR0_MCX_COMEND;
+       iowrite32(value, fotg210->reg + FOTG210_DMISGR0);
+}
+
+static int fotg210_udc_stop(struct usb_gadget *g,
+               struct usb_gadget_driver *driver)
+{
+       struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&fotg210->lock, flags);
+
+       fotg210_init(fotg210);
+       fotg210->driver = NULL;
+
+       spin_unlock_irqrestore(&fotg210->lock, flags);
+
+       return 0;
+}
+
+static struct usb_gadget_ops fotg210_gadget_ops = {
+       .udc_start              = fotg210_udc_start,
+       .udc_stop               = fotg210_udc_stop,
+};
+
+static int __exit fotg210_udc_remove(struct platform_device *pdev)
+{
+       struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev);
+
+       usb_del_gadget_udc(&fotg210->gadget);
+       iounmap(fotg210->reg);
+       free_irq(platform_get_irq(pdev, 0), fotg210);
+
+       fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+       kfree(fotg210);
+
+       return 0;
+}
+
+static int __init fotg210_udc_probe(struct platform_device *pdev)
+{
+       struct resource *res, *ires;
+       struct fotg210_udc *fotg210 = NULL;
+       struct fotg210_ep *_ep[FOTG210_MAX_NUM_EP];
+       int ret = 0;
+       int i;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               pr_err("platform_get_resource error.\n");
+               return -ENODEV;
+       }
+
+       ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!ires) {
+               pr_err("platform_get_resource IORESOURCE_IRQ error.\n");
+               return -ENODEV;
+       }
+
+       ret = -ENOMEM;
+
+       /* initialize udc */
+       fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
+       if (fotg210 == NULL) {
+               pr_err("kzalloc error\n");
+               goto err_alloc;
+       }
+
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
+               _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
+               if (_ep[i] == NULL) {
+                       pr_err("_ep kzalloc error\n");
+                       goto err_alloc;
+               }
+               fotg210->ep[i] = _ep[i];
+       }
+
+       fotg210->reg = ioremap(res->start, resource_size(res));
+       if (fotg210->reg == NULL) {
+               pr_err("ioremap error.\n");
+               goto err_map;
+       }
+
+       spin_lock_init(&fotg210->lock);
+
+       dev_set_drvdata(&pdev->dev, fotg210);
+
+       fotg210->gadget.ops = &fotg210_gadget_ops;
+
+       fotg210->gadget.max_speed = USB_SPEED_HIGH;
+       fotg210->gadget.dev.parent = &pdev->dev;
+       fotg210->gadget.dev.dma_mask = pdev->dev.dma_mask;
+       fotg210->gadget.name = udc_name;
+
+       INIT_LIST_HEAD(&fotg210->gadget.ep_list);
+
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
+               struct fotg210_ep *ep = fotg210->ep[i];
+
+               if (i) {
+                       INIT_LIST_HEAD(&fotg210->ep[i]->ep.ep_list);
+                       list_add_tail(&fotg210->ep[i]->ep.ep_list,
+                                     &fotg210->gadget.ep_list);
+               }
+               ep->fotg210 = fotg210;
+               INIT_LIST_HEAD(&ep->queue);
+               ep->ep.name = fotg210_ep_name[i];
+               ep->ep.ops = &fotg210_ep_ops;
+       }
+       fotg210->ep[0]->ep.maxpacket = 0x40;
+       fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
+       INIT_LIST_HEAD(&fotg210->gadget.ep0->ep_list);
+
+       fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
+                               GFP_KERNEL);
+       if (fotg210->ep0_req == NULL)
+               goto err_req;
+
+       fotg210_init(fotg210);
+
+       fotg210_disable_unplug(fotg210);
+
+       ret = request_irq(ires->start, fotg210_irq, IRQF_SHARED,
+                         udc_name, fotg210);
+       if (ret < 0) {
+               pr_err("request_irq error (%d)\n", ret);
+               goto err_irq;
+       }
+
+       ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
+       if (ret)
+               goto err_add_udc;
+
+       dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+
+       return 0;
+
+err_add_udc:
+err_irq:
+       free_irq(ires->start, fotg210);
+
+err_req:
+       fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+
+err_map:
+       if (fotg210->reg)
+               iounmap(fotg210->reg);
+
+err_alloc:
+       kfree(fotg210);
+
+       return ret;
+}
+
+static struct platform_driver fotg210_driver = {
+       .driver         = {
+               .name = (char *)udc_name,
+               .owner  = THIS_MODULE,
+       },
+       .probe          = fotg210_udc_probe,
+       .remove         = fotg210_udc_remove,
+};
+
+module_platform_driver(fotg210_driver);
+
+MODULE_AUTHOR("Yuan-Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/gadget/fotg210.h b/drivers/usb/gadget/fotg210.h
new file mode 100644 (file)
index 0000000..bbf991b
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * Faraday FOTG210 USB OTG controller
+ *
+ * Copyright (C) 2013 Faraday Technology Corporation
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+
+#define FOTG210_MAX_NUM_EP     5 /* ep0...ep4 */
+#define FOTG210_MAX_FIFO_NUM   4 /* fifo0...fifo4 */
+
+/* Global Mask of HC/OTG/DEV interrupt Register(0xC4) */
+#define FOTG210_GMIR           0xC4
+#define GMIR_INT_POLARITY      0x8 /*Active High*/
+#define GMIR_MHC_INT           0x4
+#define GMIR_MOTG_INT          0x2
+#define GMIR_MDEV_INT          0x1
+
+/*  Device Main Control Register(0x100) */
+#define FOTG210_DMCR           0x100
+#define DMCR_HS_EN             (1 << 6)
+#define DMCR_CHIP_EN           (1 << 5)
+#define DMCR_SFRST             (1 << 4)
+#define DMCR_GOSUSP            (1 << 3)
+#define DMCR_GLINT_EN          (1 << 2)
+#define DMCR_HALF_SPEED                (1 << 1)
+#define DMCR_CAP_RMWAKUP       (1 << 0)
+
+/* Device Address Register(0x104) */
+#define FOTG210_DAR            0x104
+#define DAR_AFT_CONF           (1 << 7)
+
+/* Device Test Register(0x108) */
+#define FOTG210_DTR            0x108
+#define DTR_TST_CLRFF          (1 << 0)
+
+/* PHY Test Mode Selector register(0x114) */
+#define FOTG210_PHYTMSR                0x114
+#define PHYTMSR_TST_PKT                (1 << 4)
+#define PHYTMSR_TST_SE0NAK     (1 << 3)
+#define PHYTMSR_TST_KSTA       (1 << 2)
+#define PHYTMSR_TST_JSTA       (1 << 1)
+#define PHYTMSR_UNPLUG         (1 << 0)
+
+/* Cx configuration and FIFO Empty Status register(0x120) */
+#define FOTG210_DCFESR         0x120
+#define DCFESR_FIFO_EMPTY(fifo)        (1 << 8 << (fifo))
+#define DCFESR_CX_EMP          (1 << 5)
+#define DCFESR_CX_CLR          (1 << 3)
+#define DCFESR_CX_STL          (1 << 2)
+#define DCFESR_TST_PKDONE      (1 << 1)
+#define DCFESR_CX_DONE         (1 << 0)
+
+/* Device IDLE Counter Register(0x124) */
+#define FOTG210_DICR           0x124
+
+/* Device Mask of Interrupt Group Register (0x130) */
+#define FOTG210_DMIGR          0x130
+#define DMIGR_MINT_G0          (1 << 0)
+
+/* Device Mask of Interrupt Source Group 0(0x134) */
+#define FOTG210_DMISGR0                0x134
+#define DMISGR0_MCX_COMEND     (1 << 3)
+#define DMISGR0_MCX_OUT_INT    (1 << 2)
+#define DMISGR0_MCX_IN_INT     (1 << 1)
+#define DMISGR0_MCX_SETUP_INT  (1 << 0)
+
+/* Device Mask of Interrupt Source Group 1 Register(0x138)*/
+#define FOTG210_DMISGR1                0x138
+#define DMISGR1_MF3_IN_INT     (1 << 19)
+#define DMISGR1_MF2_IN_INT     (1 << 18)
+#define DMISGR1_MF1_IN_INT     (1 << 17)
+#define DMISGR1_MF0_IN_INT     (1 << 16)
+#define DMISGR1_MF_IN_INT(fifo)        (1 << (16 + (fifo)))
+#define DMISGR1_MF3_SPK_INT    (1 << 7)
+#define DMISGR1_MF3_OUT_INT    (1 << 6)
+#define DMISGR1_MF2_SPK_INT    (1 << 5)
+#define DMISGR1_MF2_OUT_INT    (1 << 4)
+#define DMISGR1_MF1_SPK_INT    (1 << 3)
+#define DMISGR1_MF1_OUT_INT    (1 << 2)
+#define DMISGR1_MF0_SPK_INT    (1 << 1)
+#define DMISGR1_MF0_OUT_INT    (1 << 0)
+#define DMISGR1_MF_OUTSPK_INT(fifo)    (0x3 << (fifo) * 2)
+
+/* Device Mask of Interrupt Source Group 2 Register (0x13C) */
+#define FOTG210_DMISGR2                0x13C
+#define DMISGR2_MDMA_ERROR     (1 << 8)
+#define DMISGR2_MDMA_CMPLT     (1 << 7)
+
+/* Device Interrupt group Register (0x140) */
+#define FOTG210_DIGR           0x140
+#define DIGR_INT_G2            (1 << 2)
+#define DIGR_INT_G1            (1 << 1)
+#define DIGR_INT_G0            (1 << 0)
+
+/* Device Interrupt Source Group 0 Register (0x144) */
+#define FOTG210_DISGR0         0x144
+#define DISGR0_CX_COMABT_INT   (1 << 5)
+#define DISGR0_CX_COMFAIL_INT  (1 << 4)
+#define DISGR0_CX_COMEND_INT   (1 << 3)
+#define DISGR0_CX_OUT_INT      (1 << 2)
+#define DISGR0_CX_IN_INT       (1 << 1)
+#define DISGR0_CX_SETUP_INT    (1 << 0)
+
+/* Device Interrupt Source Group 1 Register (0x148) */
+#define FOTG210_DISGR1         0x148
+#define DISGR1_OUT_INT(fifo)   (1 << ((fifo) * 2))
+#define DISGR1_SPK_INT(fifo)   (1 << 1 << ((fifo) * 2))
+#define DISGR1_IN_INT(fifo)    (1 << 16 << (fifo))
+
+/* Device Interrupt Source Group 2 Register (0x14C) */
+#define FOTG210_DISGR2         0x14C
+#define DISGR2_DMA_ERROR       (1 << 8)
+#define DISGR2_DMA_CMPLT       (1 << 7)
+#define DISGR2_RX0BYTE_INT     (1 << 6)
+#define DISGR2_TX0BYTE_INT     (1 << 5)
+#define DISGR2_ISO_SEQ_ABORT_INT       (1 << 4)
+#define DISGR2_ISO_SEQ_ERR_INT (1 << 3)
+#define DISGR2_RESM_INT                (1 << 2)
+#define DISGR2_SUSP_INT                (1 << 1)
+#define DISGR2_USBRST_INT      (1 << 0)
+
+/* Device Receive Zero-Length Data Packet Register (0x150)*/
+#define FOTG210_RX0BYTE                0x150
+#define RX0BYTE_EP8            (1 << 7)
+#define RX0BYTE_EP7            (1 << 6)
+#define RX0BYTE_EP6            (1 << 5)
+#define RX0BYTE_EP5            (1 << 4)
+#define RX0BYTE_EP4            (1 << 3)
+#define RX0BYTE_EP3            (1 << 2)
+#define RX0BYTE_EP2            (1 << 1)
+#define RX0BYTE_EP1            (1 << 0)
+
+/* Device Transfer Zero-Length Data Packet Register (0x154)*/
+#define FOTG210_TX0BYTE                0x154
+#define TX0BYTE_EP8            (1 << 7)
+#define TX0BYTE_EP7            (1 << 6)
+#define TX0BYTE_EP6            (1 << 5)
+#define TX0BYTE_EP5            (1 << 4)
+#define TX0BYTE_EP4            (1 << 3)
+#define TX0BYTE_EP3            (1 << 2)
+#define TX0BYTE_EP2            (1 << 1)
+#define TX0BYTE_EP1            (1 << 0)
+
+/* Device IN Endpoint x MaxPacketSize Register(0x160+4*(x-1)) */
+#define FOTG210_INEPMPSR(ep)   (0x160 + 4 * ((ep) - 1))
+#define INOUTEPMPSR_MPS(mps)   ((mps) & 0x2FF)
+#define INOUTEPMPSR_STL_EP     (1 << 11)
+#define INOUTEPMPSR_RESET_TSEQ (1 << 12)
+
+/* Device OUT Endpoint x MaxPacketSize Register(0x180+4*(x-1)) */
+#define FOTG210_OUTEPMPSR(ep)  (0x180 + 4 * ((ep) - 1))
+
+/* Device Endpoint 1~4 Map Register (0x1A0) */
+#define FOTG210_EPMAP          0x1A0
+#define EPMAP_FIFONO(ep, dir)          \
+       ((((ep) - 1) << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
+#define EPMAP_FIFONOMSK(ep, dir)       \
+       ((3 << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
+
+/* Device FIFO Map Register (0x1A8) */
+#define FOTG210_FIFOMAP                0x1A8
+#define FIFOMAP_DIROUT(fifo)   (0x0 << 4 << (fifo) * 8)
+#define FIFOMAP_DIRIN(fifo)    (0x1 << 4 << (fifo) * 8)
+#define FIFOMAP_BIDIR(fifo)    (0x2 << 4 << (fifo) * 8)
+#define FIFOMAP_NA(fifo)       (0x3 << 4 << (fifo) * 8)
+#define FIFOMAP_EPNO(ep)       ((ep) << ((ep) - 1) * 8)
+#define FIFOMAP_EPNOMSK(ep)    (0xF << ((ep) - 1) * 8)
+
+/* Device FIFO Confuguration Register (0x1AC) */
+#define FOTG210_FIFOCF         0x1AC
+#define FIFOCF_TYPE(type, fifo)        ((type) << (fifo) * 8)
+#define FIFOCF_BLK_SIN(fifo)   (0x0 << (fifo) * 8 << 2)
+#define FIFOCF_BLK_DUB(fifo)   (0x1 << (fifo) * 8 << 2)
+#define FIFOCF_BLK_TRI(fifo)   (0x2 << (fifo) * 8 << 2)
+#define FIFOCF_BLKSZ_512(fifo) (0x0 << (fifo) * 8 << 4)
+#define FIFOCF_BLKSZ_1024(fifo)        (0x1 << (fifo) * 8 << 4)
+#define FIFOCF_FIFO_EN(fifo)   (0x1 << (fifo) * 8 << 5)
+
+/* Device FIFO n Instruction and Byte Count Register (0x1B0+4*n) */
+#define FOTG210_FIBCR(fifo)    (0x1B0 + (fifo) * 4)
+#define FIBCR_BCFX             0x7FF
+#define FIBCR_FFRST            (1 << 12)
+
+/* Device DMA Target FIFO Number Register (0x1C0) */
+#define FOTG210_DMATFNR                0x1C0
+#define DMATFNR_ACC_CXF                (1 << 4)
+#define DMATFNR_ACC_F3         (1 << 3)
+#define DMATFNR_ACC_F2         (1 << 2)
+#define DMATFNR_ACC_F1         (1 << 1)
+#define DMATFNR_ACC_F0         (1 << 0)
+#define DMATFNR_ACC_FN(fifo)   (1 << (fifo))
+#define DMATFNR_DISDMA         0
+
+/* Device DMA Controller Parameter setting 1 Register (0x1C8) */
+#define FOTG210_DMACPSR1       0x1C8
+#define DMACPSR1_DMA_LEN(len)  (((len) & 0xFFFF) << 8)
+#define DMACPSR1_DMA_ABORT     (1 << 3)
+#define DMACPSR1_DMA_TYPE(dir_in)      (((dir_in) ? 1 : 0) << 1)
+#define DMACPSR1_DMA_START     (1 << 0)
+
+/* Device DMA Controller Parameter setting 2 Register (0x1CC) */
+#define FOTG210_DMACPSR2       0x1CC
+
+/* Device DMA Controller Parameter setting 3 Register (0x1CC) */
+#define FOTG210_CXPORT         0x1D0
+
+struct fotg210_request {
+       struct usb_request      req;
+       struct list_head        queue;
+};
+
+struct fotg210_ep {
+       struct usb_ep           ep;
+       struct fotg210_udc      *fotg210;
+
+       struct list_head        queue;
+       unsigned                stall:1;
+       unsigned                wedged:1;
+       unsigned                use_dma:1;
+
+       unsigned char           epnum;
+       unsigned char           type;
+       unsigned char           dir_in;
+       unsigned int            maxp;
+       const struct usb_endpoint_descriptor    *desc;
+};
+
+struct fotg210_udc {
+       spinlock_t              lock; /* protect the struct */
+       void __iomem            *reg;
+
+       unsigned long           irq_trigger;
+
+       struct usb_gadget               gadget;
+       struct usb_gadget_driver        *driver;
+
+       struct fotg210_ep       *ep[FOTG210_MAX_NUM_EP];
+
+       struct usb_request      *ep0_req;       /* for internal request */
+       __le16                  ep0_data;
+       u8                      ep0_dir;        /* 0/0x80  out/in */
+
+       u8                      reenum;         /* if re-enumeration */
+};
+
+#define gadget_to_fotg210(g)   container_of((g), struct fotg210_udc, gadget)
index 9a7ee3347e4d925e55645c2e0f1de7ef03ca312a..f3bb363f1d4acffd04d028c7940f70ff594e4ab5 100644 (file)
@@ -2589,7 +2589,7 @@ static int qe_udc_probe(struct platform_device *ofdev)
        if (ret)
                goto err6;
 
-       dev_set_drvdata(&ofdev->dev, udc);
+       platform_set_drvdata(ofdev, udc);
        dev_info(udc->dev,
                        "%s USB controller initialized as device\n",
                        (udc->soc_type == PORT_QE) ? "QE" : "CPM");
@@ -2640,7 +2640,7 @@ static int qe_udc_resume(struct platform_device *dev)
 
 static int qe_udc_remove(struct platform_device *ofdev)
 {
-       struct qe_udc *udc = dev_get_drvdata(&ofdev->dev);
+       struct qe_udc *udc = platform_get_drvdata(ofdev);
        struct qe_ep *ep;
        unsigned int size;
        DECLARE_COMPLETION(done);
index b8632d40f8bffcd3f653a99c12f6d6b02a8cd11d..c83f3e16532582adcd4e600f0b11aefaa58a5661 100644 (file)
@@ -1347,7 +1347,7 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
 
 static int __exit fusb300_remove(struct platform_device *pdev)
 {
-       struct fusb300 *fusb300 = dev_get_drvdata(&pdev->dev);
+       struct fusb300 *fusb300 = platform_get_drvdata(pdev);
 
        usb_del_gadget_udc(&fusb300->gadget);
        iounmap(fusb300->reg);
@@ -1416,7 +1416,7 @@ static int __init fusb300_probe(struct platform_device *pdev)
 
        spin_lock_init(&fusb300->lock);
 
-       dev_set_drvdata(&pdev->dev, fusb300);
+       platform_set_drvdata(pdev, fusb300);
 
        fusb300->gadget.ops = &fusb300_gadget_ops;
 
index 787a78e92aa2d277a7c0e82661397d0a3843d481..5327c82472eda8034a54ca664392baf5875b0a7a 100644 (file)
 #    define USB_ETH_RNDIS y
 #  endif
 
+#define USBF_ECM_INCLUDED
 #  include "f_ecm.c"
+#define USB_FSUBSET_INCLUDED
 #  include "f_subset.c"
 #  ifdef USB_ETH_RNDIS
+#    define USB_FRNDIS_INCLUDED
 #    include "f_rndis.c"
-#    include "rndis.c"
+#    include "rndis.h"
 #  endif
-#  include "u_ether.c"
+#  include "u_ether.h"
 
-static u8 gfs_hostaddr[ETH_ALEN];
+static u8 gfs_host_mac[ETH_ALEN];
 static struct eth_dev *the_dev;
 #  ifdef CONFIG_USB_FUNCTIONFS_ETH
 static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
@@ -45,7 +48,7 @@ static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
 #else
 #  define the_dev      NULL
 #  define gether_cleanup(dev) do { } while (0)
-#  define gfs_hostaddr NULL
+#  define gfs_host_mac NULL
 struct eth_dev;
 #endif
 
@@ -73,6 +76,8 @@ struct gfs_ffs_obj {
 
 USB_GADGET_COMPOSITE_OPTIONS();
 
+USB_ETHERNET_MODULE_PARAMETERS();
+
 static struct usb_device_descriptor gfs_dev_desc = {
        .bLength                = sizeof gfs_dev_desc,
        .bDescriptorType        = USB_DT_DEVICE,
@@ -350,7 +355,8 @@ static int gfs_bind(struct usb_composite_dev *cdev)
        if (missing_funcs)
                return -ENODEV;
 #if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
-       the_dev = gether_setup(cdev->gadget, gfs_hostaddr);
+       the_dev = gether_setup(cdev->gadget, dev_addr, host_addr, gfs_host_mac,
+                              qmult);
 #endif
        if (IS_ERR(the_dev)) {
                ret = PTR_ERR(the_dev);
@@ -446,7 +452,7 @@ static int gfs_do_config(struct usb_configuration *c)
        }
 
        if (gc->eth) {
-               ret = gc->eth(c, gfs_hostaddr, the_dev);
+               ret = gc->eth(c, gfs_host_mac, the_dev);
                if (unlikely(ret < 0))
                        return ret;
        }
index 51cfe72da5bb88d6fb0a21a1d1d2753fa2e6c115..46ba9838c3a091d084af2472cd39ada76e42ac71 100644 (file)
@@ -1533,7 +1533,7 @@ static const struct usb_gadget_ops m66592_gadget_ops = {
 
 static int __exit m66592_remove(struct platform_device *pdev)
 {
-       struct m66592           *m66592 = dev_get_drvdata(&pdev->dev);
+       struct m66592           *m66592 = platform_get_drvdata(pdev);
 
        usb_del_gadget_udc(&m66592->gadget);
 
@@ -1602,7 +1602,7 @@ static int __init m66592_probe(struct platform_device *pdev)
        m66592->irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
 
        spin_lock_init(&m66592->lock);
-       dev_set_drvdata(&pdev->dev, m66592);
+       platform_set_drvdata(pdev, m66592);
 
        m66592->gadget.ops = &m66592_gadget_ops;
        m66592->gadget.max_speed = USB_SPEED_HIGH;
index 4a45e80c6e384f6adb0fa4cfe6a1efc909419625..032b96a51ce4e06a517f35070542067a002275e2 100644 (file)
@@ -43,16 +43,19 @@ MODULE_LICENSE("GPL");
  */
 #include "f_mass_storage.c"
 
+#define USBF_ECM_INCLUDED
 #include "f_ecm.c"
-#include "f_subset.c"
 #ifdef USB_ETH_RNDIS
+#  define USB_FRNDIS_INCLUDED
 #  include "f_rndis.c"
-#  include "rndis.c"
+#  include "rndis.h"
 #endif
-#include "u_ether.c"
+#include "u_ether.h"
 
 USB_GADGET_COMPOSITE_OPTIONS();
 
+USB_ETHERNET_MODULE_PARAMETERS();
+
 /***************************** Device Descriptor ****************************/
 
 #define MULTI_VENDOR_NUM       0x1d6b  /* Linux Foundation */
@@ -133,7 +136,7 @@ FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
 
 static struct fsg_common fsg_common;
 
-static u8 hostaddr[ETH_ALEN];
+static u8 host_mac[ETH_ALEN];
 
 static struct usb_function_instance *fi_acm;
 static struct eth_dev *the_dev;
@@ -152,7 +155,7 @@ static __init int rndis_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       ret = rndis_bind_config(c, hostaddr, the_dev);
+       ret = rndis_bind_config(c, host_mac, the_dev);
        if (ret < 0)
                return ret;
 
@@ -216,7 +219,7 @@ static __init int cdc_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       ret = ecm_bind_config(c, hostaddr, the_dev);
+       ret = ecm_bind_config(c, host_mac, the_dev);
        if (ret < 0)
                return ret;
 
@@ -280,7 +283,8 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
        }
 
        /* set up network link layer */
-       the_dev = gether_setup(cdev->gadget, hostaddr);
+       the_dev = gether_setup(cdev->gadget, dev_addr, host_addr, host_mac,
+                              qmult);
        if (IS_ERR(the_dev))
                return PTR_ERR(the_dev);
 
index 58288e9cf728e2e34a71e67183edeb6cf0532118..07fdb3eaf48a4fd1be386e346e6e9fccc53e78ce 100644 (file)
@@ -1786,8 +1786,6 @@ static int mv_u3d_remove(struct platform_device *dev)
 
        clk_put(u3d->clk);
 
-       platform_set_drvdata(dev, NULL);
-
        kfree(u3d);
 
        return 0;
@@ -1997,7 +1995,6 @@ err_map_cap_regs:
 err_get_cap_regs:
 err_get_clk:
        clk_put(u3d->clk);
-       platform_set_drvdata(dev, NULL);
        kfree(u3d);
 err_alloc_private:
 err_pdata:
@@ -2053,7 +2050,7 @@ static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
 
 static void mv_u3d_shutdown(struct platform_device *dev)
 {
-       struct mv_u3d *u3d = dev_get_drvdata(&dev->dev);
+       struct mv_u3d *u3d = platform_get_drvdata(dev);
        u32 tmp;
 
        tmp = ioread32(&u3d->op_regs->usbcmd);
index 3b02fd4649ce9f8daaae203ec72129f2c4683b1a..81956feca1bd844152ed26067cbc48e40d83bc1f 100644 (file)
 #include <linux/usb/composite.h>
 
 #include "u_ether.h"
+#include "u_ncm.h"
 
 #define DRIVER_DESC            "NCM Gadget"
 
 /*-------------------------------------------------------------------------*/
 
-/*
- * Kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module.  So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#include "f_ncm.c"
-#include "u_ether.c"
-
-/*-------------------------------------------------------------------------*/
-
 /* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
  * Instead:  allocate your own, using normal USB-IF procedures.
  */
@@ -54,6 +43,8 @@
 /*-------------------------------------------------------------------------*/
 USB_GADGET_COMPOSITE_OPTIONS();
 
+USB_ETHERNET_MODULE_PARAMETERS();
+
 static struct usb_device_descriptor device_desc = {
        .bLength =              sizeof device_desc,
        .bDescriptorType =      USB_DT_DEVICE,
@@ -111,13 +102,15 @@ static struct usb_gadget_strings *dev_strings[] = {
        NULL,
 };
 
-struct eth_dev *the_dev;
-static u8 hostaddr[ETH_ALEN];
+static struct usb_function_instance *f_ncm_inst;
+static struct usb_function *f_ncm;
 
 /*-------------------------------------------------------------------------*/
 
 static int __init ncm_do_config(struct usb_configuration *c)
 {
+       int status;
+
        /* FIXME alloc iConfiguration string, set it in c->strings */
 
        if (gadget_is_otg(c->cdev->gadget)) {
@@ -125,7 +118,19 @@ static int __init ncm_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       return ncm_bind_config(c, hostaddr, the_dev);
+       f_ncm = usb_get_function(f_ncm_inst);
+       if (IS_ERR(f_ncm)) {
+               status = PTR_ERR(f_ncm);
+               return status;
+       }
+
+       status = usb_add_function(c, f_ncm);
+       if (status < 0) {
+               usb_put_function(f_ncm);
+               return status;
+       }
+
+       return 0;
 }
 
 static struct usb_configuration ncm_config_driver = {
@@ -141,12 +146,20 @@ static struct usb_configuration ncm_config_driver = {
 static int __init gncm_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
+       struct f_ncm_opts       *ncm_opts;
        int                     status;
 
-       /* set up network link layer */
-       the_dev = gether_setup(cdev->gadget, hostaddr);
-       if (IS_ERR(the_dev))
-               return PTR_ERR(the_dev);
+       f_ncm_inst = usb_get_function_instance("ncm");
+       if (IS_ERR(f_ncm_inst))
+               return PTR_ERR(f_ncm_inst);
+
+       ncm_opts = container_of(f_ncm_inst, struct f_ncm_opts, func_inst);
+
+       gether_set_qmult(ncm_opts->net, qmult);
+       if (!gether_set_host_addr(ncm_opts->net, host_addr))
+               pr_info("using host ethernet address: %s", host_addr);
+       if (!gether_set_dev_addr(ncm_opts->net, dev_addr))
+               pr_info("using self ethernet address: %s", dev_addr);
 
        /* Allocate string descriptor numbers ... note that string
         * contents can be overridden by the composite_dev glue.
@@ -169,13 +182,16 @@ static int __init gncm_bind(struct usb_composite_dev *cdev)
        return 0;
 
 fail:
-       gether_cleanup(the_dev);
+       usb_put_function_instance(f_ncm_inst);
        return status;
 }
 
 static int __exit gncm_unbind(struct usb_composite_dev *cdev)
 {
-       gether_cleanup(the_dev);
+       if (!IS_ERR_OR_NULL(f_ncm))
+               usb_put_function(f_ncm);
+       if (!IS_ERR_OR_NULL(f_ncm_inst))
+               usb_put_function_instance(f_ncm_inst);
        return 0;
 }
 
index 3b344b41a167e2c69ecfb5720a29d74684315428..0a8099a488c4b4dfe77f97f4c7b78e6b17d69353 100644 (file)
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/device.h>
 
 #include "u_serial.h"
 #include "u_ether.h"
 #include "u_phonet.h"
+#include "u_ecm.h"
 #include "gadget_chips.h"
 
 /* Defines */
 #define NOKIA_VERSION_NUM              0x0211
 #define NOKIA_LONG_NAME                        "N900 (PC-Suite Mode)"
 
-/*-------------------------------------------------------------------------*/
-
-/*
- * Kbuild is not very cooperative with respect to linking separately
- * compiled library objects into one module.  So for now we won't use
- * separate compilation ... ensuring init/exit sections work to shrink
- * the runtime footprint, and giving us at least some parts of what
- * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
- */
-#define USBF_OBEX_INCLUDED
-#include "f_ecm.c"
-#include "f_obex.c"
-#include "f_phonet.c"
-#include "u_ether.c"
-
-/*-------------------------------------------------------------------------*/
 USB_GADGET_COMPOSITE_OPTIONS();
 
+USB_ETHERNET_MODULE_PARAMETERS();
+
 #define NOKIA_VENDOR_ID                        0x0421  /* Nokia */
 #define NOKIA_PRODUCT_ID               0x01c8  /* Nokia Gadget */
 
@@ -98,16 +86,15 @@ MODULE_LICENSE("GPL");
 /*-------------------------------------------------------------------------*/
 static struct usb_function *f_acm_cfg1;
 static struct usb_function *f_acm_cfg2;
-static u8 hostaddr[ETH_ALEN];
-static struct eth_dev *the_dev;
-
-enum {
-       TTY_PORT_OBEX0,
-       TTY_PORT_OBEX1,
-       TTY_PORTS_MAX,
-};
+static struct usb_function *f_ecm_cfg1;
+static struct usb_function *f_ecm_cfg2;
+static struct usb_function *f_obex1_cfg1;
+static struct usb_function *f_obex2_cfg1;
+static struct usb_function *f_obex1_cfg2;
+static struct usb_function *f_obex2_cfg2;
+static struct usb_function *f_phonet_cfg1;
+static struct usb_function *f_phonet_cfg2;
 
-static unsigned char tty_lines[TTY_PORTS_MAX];
 
 static struct usb_configuration nokia_config_500ma_driver = {
        .label          = "Bus Powered",
@@ -126,47 +113,114 @@ static struct usb_configuration nokia_config_100ma_driver = {
 };
 
 static struct usb_function_instance *fi_acm;
+static struct usb_function_instance *fi_ecm;
+static struct usb_function_instance *fi_obex1;
+static struct usb_function_instance *fi_obex2;
+static struct usb_function_instance *fi_phonet;
 
 static int __init nokia_bind_config(struct usb_configuration *c)
 {
        struct usb_function *f_acm;
+       struct usb_function *f_phonet = NULL;
+       struct usb_function *f_obex1 = NULL;
+       struct usb_function *f_ecm;
+       struct usb_function *f_obex2 = NULL;
        int status = 0;
+       int obex1_stat = 0;
+       int obex2_stat = 0;
+       int phonet_stat = 0;
+
+       if (!IS_ERR(fi_phonet)) {
+               f_phonet = usb_get_function(fi_phonet);
+               if (IS_ERR(f_phonet))
+                       pr_debug("could not get phonet function\n");
+       }
 
-       status = phonet_bind_config(c);
-       if (status)
-               printk(KERN_DEBUG "could not bind phonet config\n");
-
-       status = obex_bind_config(c, tty_lines[TTY_PORT_OBEX0]);
-       if (status)
-               printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+       if (!IS_ERR(fi_obex1)) {
+               f_obex1 = usb_get_function(fi_obex1);
+               if (IS_ERR(f_obex1))
+                       pr_debug("could not get obex function 0\n");
+       }
 
-       status = obex_bind_config(c, tty_lines[TTY_PORT_OBEX1]);
-       if (status)
-               printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+       if (!IS_ERR(fi_obex2)) {
+               f_obex2 = usb_get_function(fi_obex2);
+               if (IS_ERR(f_obex2))
+                       pr_debug("could not get obex function 1\n");
+       }
 
        f_acm = usb_get_function(fi_acm);
-       if (IS_ERR(f_acm))
-               return PTR_ERR(f_acm);
+       if (IS_ERR(f_acm)) {
+               status = PTR_ERR(f_acm);
+               goto err_get_acm;
+       }
+
+       f_ecm = usb_get_function(fi_ecm);
+       if (IS_ERR(f_ecm)) {
+               status = PTR_ERR(f_ecm);
+               goto err_get_ecm;
+       }
+
+       if (!IS_ERR_OR_NULL(f_phonet)) {
+               phonet_stat = usb_add_function(c, f_phonet);
+               if (phonet_stat)
+                       pr_debug("could not add phonet function\n");
+       }
+
+       if (!IS_ERR_OR_NULL(f_obex1)) {
+               obex1_stat = usb_add_function(c, f_obex1);
+               if (obex1_stat)
+                       pr_debug("could not add obex function 0\n");
+       }
+
+       if (!IS_ERR_OR_NULL(f_obex2)) {
+               obex2_stat = usb_add_function(c, f_obex2);
+               if (obex2_stat)
+                       pr_debug("could not add obex function 1\n");
+       }
 
        status = usb_add_function(c, f_acm);
        if (status)
                goto err_conf;
 
-       status = ecm_bind_config(c, hostaddr, the_dev);
+       status = usb_add_function(c, f_ecm);
        if (status) {
                pr_debug("could not bind ecm config %d\n", status);
                goto err_ecm;
        }
-       if (c == &nokia_config_500ma_driver)
+       if (c == &nokia_config_500ma_driver) {
                f_acm_cfg1 = f_acm;
-       else
+               f_ecm_cfg1 = f_ecm;
+               f_phonet_cfg1 = f_phonet;
+               f_obex1_cfg1 = f_obex1;
+               f_obex2_cfg1 = f_obex2;
+       } else {
                f_acm_cfg2 = f_acm;
+               f_ecm_cfg2 = f_ecm;
+               f_phonet_cfg2 = f_phonet;
+               f_obex1_cfg2 = f_obex1;
+               f_obex2_cfg2 = f_obex2;
+       }
 
        return status;
 err_ecm:
        usb_remove_function(c, f_acm);
 err_conf:
+       if (!obex2_stat)
+               usb_remove_function(c, f_obex2);
+       if (!obex1_stat)
+               usb_remove_function(c, f_obex1);
+       if (!phonet_stat)
+               usb_remove_function(c, f_phonet);
+       usb_put_function(f_ecm);
+err_get_ecm:
        usb_put_function(f_acm);
+err_get_acm:
+       if (!IS_ERR_OR_NULL(f_obex2))
+               usb_put_function(f_obex2);
+       if (!IS_ERR_OR_NULL(f_obex1))
+               usb_put_function(f_obex1);
+       if (!IS_ERR_OR_NULL(f_phonet))
+               usb_put_function(f_phonet);
        return status;
 }
 
@@ -174,23 +228,6 @@ static int __init nokia_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
        int                     status;
-       int                     cur_line;
-
-       status = gphonet_setup(cdev->gadget);
-       if (status < 0)
-               goto err_phonet;
-
-       for (cur_line = 0; cur_line < TTY_PORTS_MAX; cur_line++) {
-               status = gserial_alloc_line(&tty_lines[cur_line]);
-               if (status)
-                       goto err_ether;
-       }
-
-       the_dev = gether_setup(cdev->gadget, hostaddr);
-       if (IS_ERR(the_dev)) {
-               status = PTR_ERR(the_dev);
-               goto err_ether;
-       }
 
        status = usb_string_ids_tab(cdev, strings_dev);
        if (status < 0)
@@ -201,18 +238,40 @@ static int __init nokia_bind(struct usb_composite_dev *cdev)
        nokia_config_500ma_driver.iConfiguration = status;
        nokia_config_100ma_driver.iConfiguration = status;
 
-       if (!gadget_supports_altsettings(gadget))
+       if (!gadget_supports_altsettings(gadget)) {
+               status = -ENODEV;
                goto err_usb;
+       }
+
+       fi_phonet = usb_get_function_instance("phonet");
+       if (IS_ERR(fi_phonet))
+               pr_debug("could not find phonet function\n");
+
+       fi_obex1 = usb_get_function_instance("obex");
+       if (IS_ERR(fi_obex1))
+               pr_debug("could not find obex function 1\n");
+
+       fi_obex2 = usb_get_function_instance("obex");
+       if (IS_ERR(fi_obex2))
+               pr_debug("could not find obex function 2\n");
 
        fi_acm = usb_get_function_instance("acm");
-       if (IS_ERR(fi_acm))
-               goto err_usb;
+       if (IS_ERR(fi_acm)) {
+               status = PTR_ERR(fi_acm);
+               goto err_obex2_inst;
+       }
+
+       fi_ecm = usb_get_function_instance("ecm");
+       if (IS_ERR(fi_ecm)) {
+               status = PTR_ERR(fi_ecm);
+               goto err_acm_inst;
+       }
 
        /* finally register the configuration */
        status = usb_add_config(cdev, &nokia_config_500ma_driver,
                        nokia_bind_config);
        if (status < 0)
-               goto err_acm_inst;
+               goto err_ecm_inst;
 
        status = usb_add_config(cdev, &nokia_config_100ma_driver,
                        nokia_bind_config);
@@ -226,33 +285,55 @@ static int __init nokia_bind(struct usb_composite_dev *cdev)
 
 err_put_cfg1:
        usb_put_function(f_acm_cfg1);
+       if (!IS_ERR_OR_NULL(f_obex1_cfg1))
+               usb_put_function(f_obex1_cfg1);
+       if (!IS_ERR_OR_NULL(f_obex2_cfg1))
+               usb_put_function(f_obex2_cfg1);
+       if (!IS_ERR_OR_NULL(f_phonet_cfg1))
+               usb_put_function(f_phonet_cfg1);
+       usb_put_function(f_ecm_cfg1);
+err_ecm_inst:
+       usb_put_function_instance(fi_ecm);
 err_acm_inst:
        usb_put_function_instance(fi_acm);
+err_obex2_inst:
+       if (!IS_ERR(fi_obex2))
+               usb_put_function_instance(fi_obex2);
+       if (!IS_ERR(fi_obex1))
+               usb_put_function_instance(fi_obex1);
+       if (!IS_ERR(fi_phonet))
+               usb_put_function_instance(fi_phonet);
 err_usb:
-       gether_cleanup(the_dev);
-err_ether:
-       cur_line--;
-       while (cur_line >= 0)
-               gserial_free_line(tty_lines[cur_line--]);
-
-       gphonet_cleanup();
-err_phonet:
        return status;
 }
 
 static int __exit nokia_unbind(struct usb_composite_dev *cdev)
 {
-       int i;
-
+       if (!IS_ERR_OR_NULL(f_obex1_cfg2))
+               usb_put_function(f_obex1_cfg2);
+       if (!IS_ERR_OR_NULL(f_obex2_cfg2))
+               usb_put_function(f_obex2_cfg2);
+       if (!IS_ERR_OR_NULL(f_obex1_cfg1))
+               usb_put_function(f_obex1_cfg1);
+       if (!IS_ERR_OR_NULL(f_obex2_cfg1))
+               usb_put_function(f_obex2_cfg1);
+       if (!IS_ERR_OR_NULL(f_phonet_cfg1))
+               usb_put_function(f_phonet_cfg1);
+       if (!IS_ERR_OR_NULL(f_phonet_cfg2))
+               usb_put_function(f_phonet_cfg2);
        usb_put_function(f_acm_cfg1);
        usb_put_function(f_acm_cfg2);
+       usb_put_function(f_ecm_cfg1);
+       usb_put_function(f_ecm_cfg2);
+
+       usb_put_function_instance(fi_ecm);
+       if (!IS_ERR(fi_obex2))
+               usb_put_function_instance(fi_obex2);
+       if (!IS_ERR(fi_obex1))
+               usb_put_function_instance(fi_obex1);
+       if (!IS_ERR(fi_phonet))
+               usb_put_function_instance(fi_phonet);
        usb_put_function_instance(fi_acm);
-       gphonet_cleanup();
-
-       for (i = 0; i < TTY_PORTS_MAX; i++)
-               gserial_free_line(tty_lines[i]);
-
-       gether_cleanup(the_dev);
 
        return 0;
 }
index 6b4c7d95853f2ee0caead3191b22c0e017153c9b..41cea9566ac8bba0e194dc8b2f55668fa1d1707e 100644 (file)
@@ -2505,7 +2505,6 @@ static int pxa_udc_remove(struct platform_device *_dev)
        usb_put_phy(udc->transceiver);
 
        udc->transceiver = NULL;
-       platform_set_drvdata(_dev, NULL);
        the_controller = NULL;
        clk_put(udc->clk);
        iounmap(udc->regs);
index 7ff7d9cf2061d28ebcd2b4a2935678b0f1460144..c6af649f324008bb31706baecac5d91497a27cd7 100644 (file)
@@ -1469,11 +1469,11 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
        u16 savepipe;
        u16 mask0;
 
+       spin_lock(&r8a66597->lock);
+
        if (r8a66597_is_sudmac(r8a66597))
                r8a66597_sudmac_irq(r8a66597);
 
-       spin_lock(&r8a66597->lock);
-
        intsts0 = r8a66597_read(r8a66597, INTSTS0);
        intenb0 = r8a66597_read(r8a66597, INTENB0);
 
@@ -1822,7 +1822,7 @@ static const struct usb_gadget_ops r8a66597_gadget_ops = {
 
 static int __exit r8a66597_remove(struct platform_device *pdev)
 {
-       struct r8a66597         *r8a66597 = dev_get_drvdata(&pdev->dev);
+       struct r8a66597         *r8a66597 = platform_get_drvdata(pdev);
 
        usb_del_gadget_udc(&r8a66597->gadget);
        del_timer_sync(&r8a66597->timer);
@@ -1909,7 +1909,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
        }
 
        spin_lock_init(&r8a66597->lock);
-       dev_set_drvdata(&pdev->dev, r8a66597);
+       platform_set_drvdata(pdev, r8a66597);
        r8a66597->pdata = pdev->dev.platform_data;
        r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
 
index 1e4cfb05f70b9a0bba6a2fa421ef696d7649e8e8..3e3ea720303043c4df215143eb926f20179bb1d9 100644 (file)
@@ -761,6 +761,7 @@ int rndis_signal_connect(int configNr)
        return rndis_indicate_status_msg(configNr,
                                          RNDIS_STATUS_MEDIA_CONNECT);
 }
+EXPORT_SYMBOL(rndis_signal_connect);
 
 int rndis_signal_disconnect(int configNr)
 {
@@ -769,6 +770,7 @@ int rndis_signal_disconnect(int configNr)
        return rndis_indicate_status_msg(configNr,
                                          RNDIS_STATUS_MEDIA_DISCONNECT);
 }
+EXPORT_SYMBOL(rndis_signal_disconnect);
 
 void rndis_uninit(int configNr)
 {
@@ -783,11 +785,13 @@ void rndis_uninit(int configNr)
        while ((buf = rndis_get_next_response(configNr, &length)))
                rndis_free_response(configNr, buf);
 }
+EXPORT_SYMBOL(rndis_uninit);
 
 void rndis_set_host_mac(int configNr, const u8 *addr)
 {
        rndis_per_dev_params[configNr].host_mac = addr;
 }
+EXPORT_SYMBOL(rndis_set_host_mac);
 
 /*
  * Message Parser
@@ -870,6 +874,7 @@ int rndis_msg_parser(u8 configNr, u8 *buf)
 
        return -ENOTSUPP;
 }
+EXPORT_SYMBOL(rndis_msg_parser);
 
 int rndis_register(void (*resp_avail)(void *v), void *v)
 {
@@ -891,6 +896,7 @@ int rndis_register(void (*resp_avail)(void *v), void *v)
 
        return -ENODEV;
 }
+EXPORT_SYMBOL(rndis_register);
 
 void rndis_deregister(int configNr)
 {
@@ -899,6 +905,7 @@ void rndis_deregister(int configNr)
        if (configNr >= RNDIS_MAX_CONFIGS) return;
        rndis_per_dev_params[configNr].used = 0;
 }
+EXPORT_SYMBOL(rndis_deregister);
 
 int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
 {
@@ -912,6 +919,7 @@ int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
 
        return 0;
 }
+EXPORT_SYMBOL(rndis_set_param_dev);
 
 int rndis_set_param_vendor(u8 configNr, u32 vendorID, const char *vendorDescr)
 {
@@ -924,6 +932,7 @@ int rndis_set_param_vendor(u8 configNr, u32 vendorID, const char *vendorDescr)
 
        return 0;
 }
+EXPORT_SYMBOL(rndis_set_param_vendor);
 
 int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed)
 {
@@ -935,6 +944,7 @@ int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed)
 
        return 0;
 }
+EXPORT_SYMBOL(rndis_set_param_medium);
 
 void rndis_add_hdr(struct sk_buff *skb)
 {
@@ -949,6 +959,7 @@ void rndis_add_hdr(struct sk_buff *skb)
        header->DataOffset = cpu_to_le32(36);
        header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
 }
+EXPORT_SYMBOL(rndis_add_hdr);
 
 void rndis_free_response(int configNr, u8 *buf)
 {
@@ -965,6 +976,7 @@ void rndis_free_response(int configNr, u8 *buf)
                }
        }
 }
+EXPORT_SYMBOL(rndis_free_response);
 
 u8 *rndis_get_next_response(int configNr, u32 *length)
 {
@@ -986,6 +998,7 @@ u8 *rndis_get_next_response(int configNr, u32 *length)
 
        return NULL;
 }
+EXPORT_SYMBOL(rndis_get_next_response);
 
 static rndis_resp_t *rndis_add_response(int configNr, u32 length)
 {
@@ -1029,6 +1042,7 @@ int rndis_rm_hdr(struct gether *port,
        skb_queue_tail(list, skb);
        return 0;
 }
+EXPORT_SYMBOL(rndis_rm_hdr);
 
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
 
@@ -1160,6 +1174,7 @@ int rndis_init(void)
 
        return 0;
 }
+module_init(rndis_init);
 
 void rndis_exit(void)
 {
@@ -1173,3 +1188,6 @@ void rndis_exit(void)
        }
 #endif
 }
+module_exit(rndis_exit);
+
+MODULE_LICENSE("GPL");
index 0647f2f34e898770dac9c6021f0207e2ce6e3c28..0f4abb4c377502fb8384b34439308c02a2b47e42 100644 (file)
@@ -16,6 +16,7 @@
 #define _LINUX_RNDIS_H
 
 #include <linux/rndis.h>
+#include "u_ether.h"
 #include "ndis.h"
 
 #define RNDIS_MAXIMUM_FRAME_SIZE       1518
@@ -216,7 +217,4 @@ int  rndis_signal_disconnect (int configNr);
 int  rndis_state (int configNr);
 extern void rndis_set_host_mac (int configNr, const u8 *addr);
 
-int rndis_init(void);
-void rndis_exit (void);
-
 #endif  /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/u_ecm.h b/drivers/usb/gadget/u_ecm.h
new file mode 100644 (file)
index 0000000..262cc03
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * u_ecm.h
+ *
+ * Utility definitions for the ecm function
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_ECM_H
+#define U_ECM_H
+
+#include <linux/usb/composite.h>
+
+struct f_ecm_opts {
+       struct usb_function_instance    func_inst;
+       struct net_device               *net;
+       bool                            bound;
+
+       /*
+        * Read/write access to configfs attributes is handled by configfs.
+        *
+        * This is to protect the data from concurrent access by read/write
+        * and create symlink/remove symlink.
+        */
+       struct mutex                    lock;
+       int                             refcnt;
+};
+
+#endif /* U_ECM_H */
diff --git a/drivers/usb/gadget/u_eem.h b/drivers/usb/gadget/u_eem.h
new file mode 100644 (file)
index 0000000..e3ae978
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * u_eem.h
+ *
+ * Utility definitions for the eem function
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_EEM_H
+#define U_EEM_H
+
+#include <linux/usb/composite.h>
+
+struct f_eem_opts {
+       struct usb_function_instance    func_inst;
+       struct net_device               *net;
+       bool                            bound;
+
+       /*
+        * Read/write access to configfs attributes is handled by configfs.
+        *
+        * This is to protect the data from concurrent access by read/write
+        * and create symlink/remove symlink.
+        */
+       struct mutex                    lock;
+       int                             refcnt;
+};
+
+#endif /* U_EEM_H */
index 4b76124ce96b8dfaeab5ab0f0a89bf4f5abf15a6..2aae0d61bb19710c4006b176912eeb25c3875427 100644 (file)
@@ -63,6 +63,8 @@ struct eth_dev {
 
        struct sk_buff_head     rx_frames;
 
+       unsigned                qmult;
+
        unsigned                header_len;
        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
        int                     (*unwrap)(struct gether *,
@@ -76,6 +78,7 @@ struct eth_dev {
 
        bool                    zlp;
        u8                      host_mac[ETH_ALEN];
+       u8                      dev_mac[ETH_ALEN];
 };
 
 /*-------------------------------------------------------------------------*/
@@ -84,12 +87,8 @@ struct eth_dev {
 
 #define DEFAULT_QLEN   2       /* double buffering by default */
 
-static unsigned qmult = 5;
-module_param(qmult, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
-
 /* for dual-speed hardware, use deeper queues at high/super speed */
-static inline int qlen(struct usb_gadget *gadget)
+static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
 {
        if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
                                            gadget->speed == USB_SPEED_SUPER))
@@ -588,7 +587,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
        if (gadget_is_dualspeed(dev->gadget))
                req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
                                     dev->gadget->speed == USB_SPEED_SUPER)
-                       ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
+                       ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
                        : 0;
 
        retval = usb_ep_queue(in, req, GFP_ATOMIC);
@@ -697,16 +696,6 @@ static int eth_stop(struct net_device *net)
 
 /*-------------------------------------------------------------------------*/
 
-/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
-static char *dev_addr;
-module_param(dev_addr, charp, S_IRUGO);
-MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
-
-/* this address is invisible to ifconfig */
-static char *host_addr;
-module_param(host_addr, charp, S_IRUGO);
-MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
-
 static int get_ether_addr(const char *str, u8 *dev_addr)
 {
        if (str) {
@@ -728,6 +717,17 @@ static int get_ether_addr(const char *str, u8 *dev_addr)
        return 1;
 }
 
+static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
+{
+       if (len < 18)
+               return -EINVAL;
+
+       snprintf(str, len, "%02x:%02x:%02x:%02x:%02x:%02x",
+                dev_addr[0], dev_addr[1], dev_addr[2],
+                dev_addr[3], dev_addr[4], dev_addr[5]);
+       return 18;
+}
+
 static const struct net_device_ops eth_netdev_ops = {
        .ndo_open               = eth_open,
        .ndo_stop               = eth_stop,
@@ -755,8 +755,9 @@ static struct device_type gadget_type = {
  *
  * Returns negative errno, or zero on success
  */
-struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
-               const char *netname)
+struct eth_dev *gether_setup_name(struct usb_gadget *g,
+               const char *dev_addr, const char *host_addr,
+               u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
 {
        struct eth_dev          *dev;
        struct net_device       *net;
@@ -777,6 +778,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
 
        /* network device setup */
        dev->net = net;
+       dev->qmult = qmult;
        snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 
        if (get_ether_addr(dev_addr, net->dev_addr))
@@ -806,7 +808,8 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
                INFO(dev, "MAC %pM\n", net->dev_addr);
                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 
-               /* two kinds of host-initiated state changes:
+               /*
+                * two kinds of host-initiated state changes:
                 *  - iff DATA transfer is active, carrier is "on"
                 *  - tx queueing enabled if open *and* carrier is "on"
                 */
@@ -815,6 +818,186 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
 
        return dev;
 }
+EXPORT_SYMBOL(gether_setup_name);
+
+struct net_device *gether_setup_name_default(const char *netname)
+{
+       struct net_device       *net;
+       struct eth_dev          *dev;
+
+       net = alloc_etherdev(sizeof(*dev));
+       if (!net)
+               return ERR_PTR(-ENOMEM);
+
+       dev = netdev_priv(net);
+       spin_lock_init(&dev->lock);
+       spin_lock_init(&dev->req_lock);
+       INIT_WORK(&dev->work, eth_work);
+       INIT_LIST_HEAD(&dev->tx_reqs);
+       INIT_LIST_HEAD(&dev->rx_reqs);
+
+       skb_queue_head_init(&dev->rx_frames);
+
+       /* network device setup */
+       dev->net = net;
+       dev->qmult = QMULT_DEFAULT;
+       snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+       eth_random_addr(dev->dev_mac);
+       pr_warn("using random %s ethernet address\n", "self");
+       eth_random_addr(dev->host_mac);
+       pr_warn("using random %s ethernet address\n", "host");
+
+       net->netdev_ops = &eth_netdev_ops;
+
+       SET_ETHTOOL_OPS(net, &ops);
+       SET_NETDEV_DEVTYPE(net, &gadget_type);
+
+       return net;
+}
+EXPORT_SYMBOL(gether_setup_name_default);
+
+int gether_register_netdev(struct net_device *net)
+{
+       struct eth_dev *dev;
+       struct usb_gadget *g;
+       struct sockaddr sa;
+       int status;
+
+       if (!net->dev.parent)
+               return -EINVAL;
+       dev = netdev_priv(net);
+       g = dev->gadget;
+       status = register_netdev(net);
+       if (status < 0) {
+               dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+               return status;
+       } else {
+               INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+               /* two kinds of host-initiated state changes:
+                *  - iff DATA transfer is active, carrier is "on"
+                *  - tx queueing enabled if open *and* carrier is "on"
+                */
+               netif_carrier_off(net);
+       }
+       sa.sa_family = net->type;
+       memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
+       rtnl_lock();
+       status = dev_set_mac_address(net, &sa);
+       rtnl_unlock();
+       if (status)
+               pr_warn("cannot set self ethernet address: %d\n", status);
+       else
+               INFO(dev, "MAC %pM\n", dev->dev_mac);
+
+       return status;
+}
+EXPORT_SYMBOL(gether_register_netdev);
+
+void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
+{
+       struct eth_dev *dev;
+
+       dev = netdev_priv(net);
+       dev->gadget = g;
+       SET_NETDEV_DEV(net, &g->dev);
+}
+EXPORT_SYMBOL(gether_set_gadget);
+
+int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
+{
+       struct eth_dev *dev;
+       u8 new_addr[ETH_ALEN];
+
+       dev = netdev_priv(net);
+       if (get_ether_addr(dev_addr, new_addr))
+               return -EINVAL;
+       memcpy(dev->dev_mac, new_addr, ETH_ALEN);
+       return 0;
+}
+EXPORT_SYMBOL(gether_set_dev_addr);
+
+int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
+{
+       struct eth_dev *dev;
+
+       dev = netdev_priv(net);
+       return get_ether_addr_str(dev->dev_mac, dev_addr, len);
+}
+EXPORT_SYMBOL(gether_get_dev_addr);
+
+int gether_set_host_addr(struct net_device *net, const char *host_addr)
+{
+       struct eth_dev *dev;
+       u8 new_addr[ETH_ALEN];
+
+       dev = netdev_priv(net);
+       if (get_ether_addr(host_addr, new_addr))
+               return -EINVAL;
+       memcpy(dev->host_mac, new_addr, ETH_ALEN);
+       return 0;
+}
+EXPORT_SYMBOL(gether_set_host_addr);
+
+int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
+{
+       struct eth_dev *dev;
+
+       dev = netdev_priv(net);
+       return get_ether_addr_str(dev->host_mac, host_addr, len);
+}
+EXPORT_SYMBOL(gether_get_host_addr);
+
+int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
+{
+       struct eth_dev *dev;
+
+       if (len < 13)
+               return -EINVAL;
+
+       dev = netdev_priv(net);
+       snprintf(host_addr, len, "%pm", dev->host_mac);
+
+       return strlen(host_addr);
+}
+EXPORT_SYMBOL(gether_get_host_addr_cdc);
+
+void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
+{
+       struct eth_dev *dev;
+
+       dev = netdev_priv(net);
+       memcpy(host_mac, dev->host_mac, ETH_ALEN);
+}
+EXPORT_SYMBOL(gether_get_host_addr_u8);
+
+void gether_set_qmult(struct net_device *net, unsigned qmult)
+{
+       struct eth_dev *dev;
+
+       dev = netdev_priv(net);
+       dev->qmult = qmult;
+}
+EXPORT_SYMBOL(gether_set_qmult);
+
+unsigned gether_get_qmult(struct net_device *net)
+{
+       struct eth_dev *dev;
+
+       dev = netdev_priv(net);
+       return dev->qmult;
+}
+EXPORT_SYMBOL(gether_get_qmult);
+
+int gether_get_ifname(struct net_device *net, char *name, int len)
+{
+       rtnl_lock();
+       strlcpy(name, netdev_name(net), len);
+       rtnl_unlock();
+       return strlen(name);
+}
+EXPORT_SYMBOL(gether_get_ifname);
 
 /**
  * gether_cleanup - remove Ethernet-over-USB device
@@ -831,6 +1014,7 @@ void gether_cleanup(struct eth_dev *dev)
        flush_work(&dev->work);
        free_netdev(dev->net);
 }
+EXPORT_SYMBOL(gether_cleanup);
 
 /**
  * gether_connect - notify network layer that USB link is active
@@ -873,11 +1057,12 @@ struct net_device *gether_connect(struct gether *link)
        }
 
        if (result == 0)
-               result = alloc_requests(dev, link, qlen(dev->gadget));
+               result = alloc_requests(dev, link, qlen(dev->gadget,
+                                       dev->qmult));
 
        if (result == 0) {
                dev->zlp = link->is_zlp_ok;
-               DBG(dev, "qlen %d\n", qlen(dev->gadget));
+               DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
 
                dev->header_len = link->header_len;
                dev->unwrap = link->unwrap;
@@ -910,6 +1095,7 @@ fail0:
                return ERR_PTR(result);
        return dev->net;
 }
+EXPORT_SYMBOL(gether_connect);
 
 /**
  * gether_disconnect - notify network layer that USB link is inactive
@@ -980,3 +1166,7 @@ void gether_disconnect(struct gether *link)
        dev->port_usb = NULL;
        spin_unlock(&dev->lock);
 }
+EXPORT_SYMBOL(gether_disconnect);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
index 02522338a7081abad0f92c3506a84724f61df23b..fb23d1fde8eb52701cab66b6b1103423d63e13de 100644 (file)
 
 #include "gadget_chips.h"
 
+#define QMULT_DEFAULT 5
+
+/*
+ * dev_addr: initial value
+ * changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx"
+ * host_addr: this address is invisible to ifconfig
+ */
+#define USB_ETHERNET_MODULE_PARAMETERS() \
+       static unsigned qmult = QMULT_DEFAULT;                          \
+       module_param(qmult, uint, S_IRUGO|S_IWUSR);                     \
+       MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");\
+                                                                       \
+       static char *dev_addr;                                          \
+       module_param(dev_addr, charp, S_IRUGO);                         \
+       MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");          \
+                                                                       \
+       static char *host_addr;                                         \
+       module_param(host_addr, charp, S_IRUGO);                        \
+       MODULE_PARM_DESC(host_addr, "Host Ethernet Address")
+
 struct eth_dev;
 
 /*
@@ -71,8 +91,9 @@ struct gether {
                        |USB_CDC_PACKET_TYPE_DIRECTED)
 
 /* variant of gether_setup that allows customizing network device name */
-struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
-               const char *netname);
+struct eth_dev *gether_setup_name(struct usb_gadget *g,
+               const char *dev_addr, const char *host_addr,
+               u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname);
 
 /* netdev setup/teardown as directed by the gadget driver */
 /* gether_setup - initialize one ethernet-over-usb link
@@ -88,11 +109,145 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
  * Returns negative errno, or zero on success
  */
 static inline struct eth_dev *gether_setup(struct usb_gadget *g,
-               u8 ethaddr[ETH_ALEN])
+               const char *dev_addr, const char *host_addr,
+               u8 ethaddr[ETH_ALEN], unsigned qmult)
 {
-       return gether_setup_name(g, ethaddr, "usb");
+       return gether_setup_name(g, dev_addr, host_addr, ethaddr, qmult, "usb");
 }
 
+/*
+ * variant of gether_setup_default that allows customizing
+ * network device name
+ */
+struct net_device *gether_setup_name_default(const char *netname);
+
+/*
+ * gether_register_netdev - register the net device
+ * @net: net device to register
+ *
+ * Registers the net device associated with this ethernet-over-usb link
+ *
+ */
+int gether_register_netdev(struct net_device *net);
+
+/* gether_setup_default - initialize one ethernet-over-usb link
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses
+ * are set to random values.
+ *
+ * Returns negative errno, or zero on success
+ */
+static inline struct net_device *gether_setup_default(void)
+{
+       return gether_setup_name_default("usb");
+}
+
+/**
+ * gether_set_gadget - initialize one ethernet-over-usb link with a gadget
+ * @net: device representing this link
+ * @g: the gadget to initialize with
+ *
+ * This associates one ethernet-over-usb link with a gadget.
+ */
+void gether_set_gadget(struct net_device *net, struct usb_gadget *g);
+
+/**
+ * gether_set_dev_addr - initialize an ethernet-over-usb link with eth address
+ * @net: device representing this link
+ * @dev_addr: eth address of this device
+ *
+ * This sets the device-side Ethernet address of this ethernet-over-usb link
+ * if dev_addr is correct.
+ * Returns negative errno if the new address is incorrect.
+ */
+int gether_set_dev_addr(struct net_device *net, const char *dev_addr);
+
+/**
+ * gether_get_dev_addr - get an ethernet-over-usb link eth address
+ * @net: device representing this link
+ * @dev_addr: place to store device's eth address
+ * @len: length of the @dev_addr buffer
+ *
+ * This gets the device-side Ethernet address of this ethernet-over-usb link.
+ * Returns zero on success, else negative errno.
+ */
+int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len);
+
+/**
+ * gether_set_host_addr - initialize an ethernet-over-usb link with host address
+ * @net: device representing this link
+ * @host_addr: eth address of the host
+ *
+ * This sets the host-side Ethernet address of this ethernet-over-usb link
+ * if host_addr is correct.
+ * Returns negative errno if the new address is incorrect.
+ */
+int gether_set_host_addr(struct net_device *net, const char *host_addr);
+
+/**
+ * gether_get_host_addr - get an ethernet-over-usb link host address
+ * @net: device representing this link
+ * @host_addr: place to store eth address of the host
+ * @len: length of the @host_addr buffer
+ *
+ * This gets the host-side Ethernet address of this ethernet-over-usb link.
+ * Returns zero on success, else negative errno.
+ */
+int gether_get_host_addr(struct net_device *net, char *host_addr, int len);
+
+/**
+ * gether_get_host_addr_cdc - get an ethernet-over-usb link host address
+ * @net: device representing this link
+ * @host_addr: place to store eth address of the host
+ * @len: length of the @host_addr buffer
+ *
+ * This gets the CDC formatted host-side Ethernet address of this
+ * ethernet-over-usb link.
+ * Returns zero on success, else negative errno.
+ */
+int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len);
+
+/**
+ * gether_get_host_addr_u8 - get an ethernet-over-usb link host address
+ * @net: device representing this link
+ * @host_mac: place to store the eth address of the host
+ *
+ * This gets the binary formatted host-side Ethernet address of this
+ * ethernet-over-usb link.
+ */
+void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]);
+
+/**
+ * gether_set_qmult - initialize an ethernet-over-usb link with a multiplier
+ * @net: device representing this link
+ * @qmult: queue multiplier
+ *
+ * This sets the queue length multiplier of this ethernet-over-usb link.
+ * For higher speeds use longer queues.
+ */
+void gether_set_qmult(struct net_device *net, unsigned qmult);
+
+/**
+ * gether_get_qmult - get an ethernet-over-usb link multiplier
+ * @net: device representing this link
+ *
+ * This gets the queue length multiplier of this ethernet-over-usb link.
+ */
+unsigned gether_get_qmult(struct net_device *net);
+
+/**
+ * gether_get_ifname - get an ethernet-over-usb link interface name
+ * @net: device representing this link
+ * @name: place to store the interface name
+ * @len: length of the @name buffer
+ *
+ * This gets the interface name of this ethernet-over-usb link.
+ * Returns zero on success, else negative errno.
+ */
+int gether_get_ifname(struct net_device *net, char *name, int len);
+
 void gether_cleanup(struct eth_dev *dev);
 
 /* connect/disconnect is handled by individual functions */
@@ -117,9 +272,6 @@ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
                struct eth_dev *dev);
 int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
                struct eth_dev *dev);
-int ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
-               struct eth_dev *dev);
-int eem_bind_config(struct usb_configuration *c, struct eth_dev *dev);
 
 #ifdef USB_ETH_RNDIS
 
diff --git a/drivers/usb/gadget/u_ether_configfs.h b/drivers/usb/gadget/u_ether_configfs.h
new file mode 100644 (file)
index 0000000..bcbd301
--- /dev/null
@@ -0,0 +1,164 @@
+/*
+ * u_ether_configfs.h
+ *
+ * Utility definitions for configfs support in USB Ethernet functions
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __U_ETHER_CONFIGFS_H
+#define __U_ETHER_CONFIGFS_H
+
+#define USB_ETHERNET_CONFIGFS_ITEM(_f_)                                        \
+       CONFIGFS_ATTR_STRUCT(f_##_f_##_opts);                           \
+       CONFIGFS_ATTR_OPS(f_##_f_##_opts);                              \
+                                                                       \
+       static void _f_##_attr_release(struct config_item *item)        \
+       {                                                               \
+               struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);  \
+                                                                       \
+               usb_put_function_instance(&opts->func_inst);            \
+       }                                                               \
+                                                                       \
+       static struct configfs_item_operations _f_##_item_ops = {       \
+               .release        = _f_##_attr_release,                   \
+               .show_attribute = f_##_f_##_opts_attr_show,             \
+               .store_attribute = f_##_f_##_opts_attr_store,           \
+       }
+
+#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(_f_)                  \
+       static ssize_t _f_##_opts_dev_addr_show(struct f_##_f_##_opts *opts, \
+                                               char *page)             \
+       {                                                               \
+               int result;                                             \
+                                                                       \
+               mutex_lock(&opts->lock);                                \
+               result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
+               mutex_unlock(&opts->lock);                              \
+                                                                       \
+               return result;                                          \
+       }                                                               \
+                                                                       \
+       static ssize_t _f_##_opts_dev_addr_store(struct f_##_f_##_opts *opts, \
+                                                const char *page, size_t len)\
+       {                                                               \
+               int ret;                                                \
+                                                                       \
+               mutex_lock(&opts->lock);                                \
+               if (opts->refcnt) {                                     \
+                       mutex_unlock(&opts->lock);                      \
+                       return -EBUSY;                                  \
+               }                                                       \
+                                                                       \
+               ret = gether_set_dev_addr(opts->net, page);             \
+               mutex_unlock(&opts->lock);                              \
+               if (!ret)                                               \
+                       ret = len;                                      \
+               return ret;                                             \
+       }                                                               \
+                                                                       \
+       static struct f_##_f_##_opts_attribute f_##_f_##_opts_dev_addr = \
+               __CONFIGFS_ATTR(dev_addr, S_IRUGO | S_IWUSR,            \
+                               _f_##_opts_dev_addr_show,               \
+                               _f_##_opts_dev_addr_store)
+
+#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(_f_)                 \
+       static ssize_t _f_##_opts_host_addr_show(struct f_##_f_##_opts *opts, \
+                                                char *page)            \
+       {                                                               \
+               int result;                                             \
+                                                                       \
+               mutex_lock(&opts->lock);                                \
+               result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
+               mutex_unlock(&opts->lock);                              \
+                                                                       \
+               return result;                                          \
+       }                                                               \
+                                                                       \
+       static ssize_t _f_##_opts_host_addr_store(struct f_##_f_##_opts *opts, \
+                                                 const char *page, size_t len)\
+       {                                                               \
+               int ret;                                                \
+                                                                       \
+               mutex_lock(&opts->lock);                                \
+               if (opts->refcnt) {                                     \
+                       mutex_unlock(&opts->lock);                      \
+                       return -EBUSY;                                  \
+               }                                                       \
+                                                                       \
+               ret = gether_set_host_addr(opts->net, page);            \
+               mutex_unlock(&opts->lock);                              \
+               if (!ret)                                               \
+                       ret = len;                                      \
+               return ret;                                             \
+       }                                                               \
+                                                                       \
+       static struct f_##_f_##_opts_attribute f_##_f_##_opts_host_addr = \
+               __CONFIGFS_ATTR(host_addr, S_IRUGO | S_IWUSR,           \
+                               _f_##_opts_host_addr_show,              \
+                               _f_##_opts_host_addr_store)
+
+#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(_f_)                     \
+       static ssize_t _f_##_opts_qmult_show(struct f_##_f_##_opts *opts, \
+                                            char *page)                \
+       {                                                               \
+               unsigned qmult;                                         \
+                                                                       \
+               mutex_lock(&opts->lock);                                \
+               qmult = gether_get_qmult(opts->net);                    \
+               mutex_unlock(&opts->lock);                              \
+               return sprintf(page, "%d", qmult);                      \
+       }                                                               \
+                                                                       \
+       static ssize_t _f_##_opts_qmult_store(struct f_##_f_##_opts *opts, \
+                                             const char *page, size_t len)\
+       {                                                               \
+               u8 val;                                                 \
+               int ret;                                                \
+                                                                       \
+               mutex_lock(&opts->lock);                                \
+               if (opts->refcnt) {                                     \
+                       ret = -EBUSY;                                   \
+                       goto out;                                       \
+               }                                                       \
+                                                                       \
+               ret = kstrtou8(page, 0, &val);                          \
+               if (ret)                                                \
+                       goto out;                                       \
+                                                                       \
+               gether_set_qmult(opts->net, val);                       \
+               ret = len;                                              \
+out:                                                                   \
+               mutex_unlock(&opts->lock);                              \
+               return ret;                                             \
+       }                                                               \
+                                                                       \
+       static struct f_##_f_##_opts_attribute f_##_f_##_opts_qmult =   \
+               __CONFIGFS_ATTR(qmult, S_IRUGO | S_IWUSR,               \
+                               _f_##_opts_qmult_show,          \
+                               _f_##_opts_qmult_store)
+
+#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(_f_)                    \
+       static ssize_t _f_##_opts_ifname_show(struct f_##_f_##_opts *opts, \
+                                             char *page)               \
+       {                                                               \
+               int ret;                                                \
+                                                                       \
+               mutex_lock(&opts->lock);                                \
+               ret = gether_get_ifname(opts->net, page, PAGE_SIZE);    \
+               mutex_unlock(&opts->lock);                              \
+                                                                       \
+               return ret;                                             \
+       }                                                               \
+                                                                       \
+       static struct f_##_f_##_opts_attribute f_##_f_##_opts_ifname =  \
+               __CONFIGFS_ATTR_RO(ifname, _f_##_opts_ifname_show)
+
+#endif /* __U_ETHER_CONFIGFS_H */
diff --git a/drivers/usb/gadget/u_gether.h b/drivers/usb/gadget/u_gether.h
new file mode 100644 (file)
index 0000000..d407842
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * u_gether.h
+ *
+ * Utility definitions for the subset function
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_GETHER_H
+#define U_GETHER_H
+
+#include <linux/usb/composite.h>
+
+struct f_gether_opts {
+       struct usb_function_instance    func_inst;
+       struct net_device               *net;
+       bool                            bound;
+
+       /*
+        * Read/write access to configfs attributes is handled by configfs.
+        *
+        * This is to protect the data from concurrent access by read/write
+        * and create symlink/remove symlink.
+        */
+       struct mutex                    lock;
+       int                             refcnt;
+};
+
+#endif /* U_GETHER_H */
diff --git a/drivers/usb/gadget/u_ncm.h b/drivers/usb/gadget/u_ncm.h
new file mode 100644 (file)
index 0000000..ce0f3a7
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * u_ncm.h
+ *
+ * Utility definitions for the ncm function
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_NCM_H
+#define U_NCM_H
+
+#include <linux/usb/composite.h>
+
+struct f_ncm_opts {
+       struct usb_function_instance    func_inst;
+       struct net_device               *net;
+       bool                            bound;
+
+       /*
+        * Read/write access to configfs attributes is handled by configfs.
+        *
+        * This is to protect the data from concurrent access by read/write
+        * and create symlink/remove symlink.
+        */
+       struct mutex                    lock;
+       int                             refcnt;
+};
+
+#endif /* U_NCM_H */
index 09a75259b6cd4f790bb62954f34880bb180050be..98ced18779eaf44c693ba7841710cd3169bd5682 100644 (file)
 #include <linux/usb/composite.h>
 #include <linux/usb/cdc.h>
 
-int gphonet_setup(struct usb_gadget *gadget);
-int phonet_bind_config(struct usb_configuration *c);
-void gphonet_cleanup(void);
+struct f_phonet_opts {
+       struct usb_function_instance func_inst;
+       bool bound;
+       struct net_device *net;
+};
+
+struct net_device *gphonet_setup_default(void);
+void gphonet_set_gadget(struct net_device *net, struct usb_gadget *g);
+int gphonet_register_netdev(struct net_device *net);
+int phonet_bind_config(struct usb_configuration *c, struct net_device *dev);
+void gphonet_cleanup(struct net_device *dev);
 
 #endif /* __U_PHONET_H */
diff --git a/drivers/usb/gadget/u_rndis.h b/drivers/usb/gadget/u_rndis.h
new file mode 100644 (file)
index 0000000..c62ba82
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * u_rndis.h
+ *
+ * Utility definitions for the subset function
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef U_RNDIS_H
+#define U_RNDIS_H
+
+#include <linux/usb/composite.h>
+
+struct f_rndis_opts {
+       struct usb_function_instance    func_inst;
+       u32                             vendor_id;
+       const char                      *manufacturer;
+       struct net_device               *net;
+       bool                            bound;
+       bool                            borrowed_net;
+
+       /*
+        * Read/write access to configfs attributes is handled by configfs.
+        *
+        * This is to protect the data from concurrent access by read/write
+        * and create symlink/remove symlink.
+        */
+       struct mutex                    lock;
+       int                             refcnt;
+};
+
+void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net);
+
+#endif /* U_RNDIS_H */
index 7ce27e35550b7e50c887764b53f6907e4fc5864d..e6170478ea9f737b0a6eeec9a1a9524a5307db62 100644 (file)
@@ -103,10 +103,26 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
        spin_unlock_irqrestore(&queue->irqlock, flags);
 }
 
+static void uvc_wait_prepare(struct vb2_queue *vq)
+{
+       struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+
+       mutex_unlock(&queue->mutex);
+}
+
+static void uvc_wait_finish(struct vb2_queue *vq)
+{
+       struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+
+       mutex_lock(&queue->mutex);
+}
+
 static struct vb2_ops uvc_queue_qops = {
        .queue_setup = uvc_queue_setup,
        .buf_prepare = uvc_buffer_prepare,
        .buf_queue = uvc_buffer_queue,
+       .wait_prepare = uvc_wait_prepare,
+       .wait_finish = uvc_wait_finish,
 };
 
 static int uvc_queue_init(struct uvc_video_queue *queue,
index 344d5e2f87d73a7234452a2b698f63b4959494e5..7d0aa5f58f7a29d64ce9b926acaeab09df637b7d 100644 (file)
@@ -17,7 +17,6 @@ config USB_C67X00_HCD
 
 config USB_XHCI_HCD
        tristate "xHCI HCD (USB 3.0) support"
-       depends on USB_ARCH_HAS_XHCI
        ---help---
          The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
          "SuperSpeed" host controller hardware.
@@ -43,7 +42,6 @@ endif # USB_XHCI_HCD
 
 config USB_EHCI_HCD
        tristate "EHCI HCD (USB 2.0) support"
-       depends on USB_ARCH_HAS_EHCI
        ---help---
          The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
          "high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
@@ -200,7 +198,7 @@ config USB_EHCI_MSM
          has an external PHY.
 
 config USB_EHCI_TEGRA
-       boolean "NVIDIA Tegra HCD support"
+       tristate "NVIDIA Tegra HCD support"
        depends on ARCH_TEGRA
        select USB_EHCI_ROOT_HUB_TT
        select USB_PHY
@@ -345,9 +343,19 @@ config USB_ISP1362_HCD
          To compile this driver as a module, choose M here: the
          module will be called isp1362-hcd.
 
+config USB_FUSBH200_HCD
+       tristate "FUSBH200 HCD support"
+       depends on USB
+       default N
+       ---help---
+       Faraday FUSBH200 is designed to meet USB2.0 EHCI specification
+       with minor modification.
+
+       To compile this driver as a module, choose M here: the
+       module will be called fusbh200-hcd.
+
 config USB_OHCI_HCD
-       tristate "OHCI HCD support"
-       depends on USB_ARCH_HAS_OHCI
+       tristate "OHCI HCD (USB 1.1) support"
        select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
        depends on USB_ISP1301 || !ARCH_LPC32XX
        ---help---
@@ -415,8 +423,8 @@ config USB_OHCI_HCD_PPC_OF
        default USB_OHCI_HCD_PPC_OF_BE || USB_OHCI_HCD_PPC_OF_LE
 
 config USB_OHCI_HCD_PCI
-       bool "OHCI support for PCI-bus USB controllers"
-       depends on PCI && (STB03xxx || PPC_MPC52xx || USB_OHCI_HCD_PPC_OF)
+       tristate "OHCI support for PCI-bus USB controllers"
+       depends on PCI
        default y
        select USB_OHCI_LITTLE_ENDIAN
        ---help---
@@ -470,7 +478,7 @@ config USB_CNS3XXX_OHCI
          It is needed for low-speed USB 1.0 device support.
 
 config USB_OHCI_HCD_PLATFORM
-       bool "Generic OHCI driver for a platform device"
+       tristate "Generic OHCI driver for a platform device"
        default n
        ---help---
          Adds an OHCI host driver for a generic platform device, which
index 4fb73c156d72ac998fc8f6f60684bc212d26919d..bea71127b15f5f13d2423e44d41224ca11264499 100644 (file)
@@ -33,11 +33,16 @@ obj-$(CONFIG_USB_EHCI_HCD_SPEAR)    += ehci-spear.o
 obj-$(CONFIG_USB_EHCI_S5P)     += ehci-s5p.o
 obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
 obj-$(CONFIG_USB_EHCI_MSM)     += ehci-msm.o
+obj-$(CONFIG_USB_EHCI_TEGRA)   += ehci-tegra.o
 
 obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
 obj-$(CONFIG_USB_ISP116X_HCD)  += isp116x-hcd.o
 obj-$(CONFIG_USB_ISP1362_HCD)  += isp1362-hcd.o
+
 obj-$(CONFIG_USB_OHCI_HCD)     += ohci-hcd.o
+obj-$(CONFIG_USB_OHCI_HCD_PCI) += ohci-pci.o
+obj-$(CONFIG_USB_OHCI_HCD_PLATFORM)    += ohci-platform.o
+
 obj-$(CONFIG_USB_UHCI_HCD)     += uhci-hcd.o
 obj-$(CONFIG_USB_FHCI_HCD)     += fhci.o
 obj-$(CONFIG_USB_XHCI_HCD)     += xhci-hcd.o
@@ -52,3 +57,4 @@ obj-$(CONFIG_USB_FSL_MPH_DR_OF)       += fsl-mph-dr-of.o
 obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
 obj-$(CONFIG_USB_HCD_BCMA)     += bcma-hcd.o
 obj-$(CONFIG_USB_HCD_SSB)      += ssb-hcd.o
+obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
index 02f4611faa62c571a3a59ff4b33a35ead1966caf..3b645ff46f7b9f8df67d2d793c1758be26173b21 100644 (file)
@@ -37,15 +37,15 @@ static int clocked;
 
 static void atmel_start_clock(void)
 {
-       clk_enable(iclk);
-       clk_enable(fclk);
+       clk_prepare_enable(iclk);
+       clk_prepare_enable(fclk);
        clocked = 1;
 }
 
 static void atmel_stop_clock(void)
 {
-       clk_disable(fclk);
-       clk_disable(iclk);
+       clk_disable_unprepare(fclk);
+       clk_disable_unprepare(iclk);
        clocked = 0;
 }
 
index 3be3df233a0e0b73b8bdc198d5aae050b6e520be..bd831ec06dcd8b8192749bc31d4afd137ab361bf 100644 (file)
@@ -732,6 +732,7 @@ static struct platform_driver ehci_fsl_driver = {
        .shutdown = usb_hcd_platform_shutdown,
        .driver = {
                .name = "fsl-ehci",
+               .owner  = THIS_MODULE,
                .pm = EHCI_FSL_PM_OPS,
        },
 };
index 5d75de9729b668e22afe4b7e57fe1050c3c320cc..a77bd8dc33f42fe6662cd88b9b2fbb8f879a6aa2 100644 (file)
@@ -153,9 +153,7 @@ err_irq:
 
 static int ehci_hcd_grlib_remove(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-
-       dev_set_drvdata(&op->dev, NULL);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        dev_dbg(&op->dev, "stopping GRLIB GRUSBHC EHCI USB Controller\n");
 
@@ -171,7 +169,7 @@ static int ehci_hcd_grlib_remove(struct platform_device *op)
 
 static void ehci_hcd_grlib_shutdown(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
index 246e124e6ac55c3dc66ba822a9b3f8ba5bebdd6a..7abf1ce3a670ffeed76be2bcf1da9fccfcf63399 100644 (file)
@@ -139,7 +139,7 @@ static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
 /*-------------------------------------------------------------------------*/
 
 /*
- * handshake - spin reading hc until handshake completes or fails
+ * ehci_handshake - spin reading hc until handshake completes or fails
  * @ptr: address of hc register to be read
  * @mask: bits to look at in result of read
  * @done: value of those bits when handshake succeeds
@@ -155,8 +155,8 @@ static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
  * before driver shutdown. But it also seems to be caused by bugs in cardbus
  * bridge shutdown:  shutting down the bridge before the devices using it.
  */
-static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
-                     u32 mask, u32 done, int usec)
+int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+                  u32 mask, u32 done, int usec)
 {
        u32     result;
 
@@ -172,6 +172,7 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
        } while (usec > 0);
        return -ETIMEDOUT;
 }
+EXPORT_SYMBOL_GPL(ehci_handshake);
 
 /* check TDI/ARC silicon is in host mode */
 static int tdi_in_host_mode (struct ehci_hcd *ehci)
@@ -212,7 +213,7 @@ static int ehci_halt (struct ehci_hcd *ehci)
        spin_unlock_irq(&ehci->lock);
        synchronize_irq(ehci_to_hcd(ehci)->irq);
 
-       return handshake(ehci, &ehci->regs->status,
+       return ehci_handshake(ehci, &ehci->regs->status,
                          STS_HALT, STS_HALT, 16 * 125);
 }
 
@@ -251,7 +252,7 @@ static int ehci_reset (struct ehci_hcd *ehci)
        ehci_writel(ehci, command, &ehci->regs->command);
        ehci->rh_state = EHCI_RH_HALTED;
        ehci->next_statechange = jiffies;
-       retval = handshake (ehci, &ehci->regs->command,
+       retval = ehci_handshake(ehci, &ehci->regs->command,
                            CMD_RESET, 0, 250 * 1000);
 
        if (ehci->has_hostpc) {
@@ -286,7 +287,8 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
 
        /* wait for any schedule enables/disables to take effect */
        temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
-       handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp, 16 * 125);
+       ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
+                       16 * 125);
 
        /* then disable anything that's still active */
        spin_lock_irq(&ehci->lock);
@@ -295,7 +297,8 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
        spin_unlock_irq(&ehci->lock);
 
        /* hardware can take 16 microframes to turn off ... */
-       handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0, 16 * 125);
+       ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
+                       16 * 125);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1266,11 +1269,6 @@ MODULE_LICENSE ("GPL");
 #define        PLATFORM_DRIVER         ehci_hcd_msp_driver
 #endif
 
-#ifdef CONFIG_USB_EHCI_TEGRA
-#include "ehci-tegra.c"
-#define PLATFORM_DRIVER                tegra_ehci_driver
-#endif
-
 #ifdef CONFIG_SPARC_LEON
 #include "ehci-grlib.c"
 #define PLATFORM_DRIVER                ehci_grlib_driver
index 9ab4a4d9768a5a3ed3fa791746e01977445ba840..2b702772d04d3353cc570c340f04c9701fa11210 100644 (file)
@@ -42,6 +42,12 @@ static int ehci_hub_control(
        u16             wLength
 );
 
+static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
+{
+       return !udev->maxchild && udev->persist_enabled &&
+               udev->bus->root_hub->speed < USB_SPEED_HIGH;
+}
+
 /* After a power loss, ports that were owned by the companion must be
  * reset so that the companion can still own them.
  */
@@ -56,6 +62,16 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
        if (!ehci->owned_ports)
                return;
 
+       /*
+        * USB 1.1 devices are mostly HIDs, which don't need to persist across
+        * suspends. If we ensure that none of our companion's devices have
+        * persist_enabled (by looking through all USB 1.1 buses in the system),
+        * we can skip this and avoid slowing resume down. Devices without
+        * persist will just get reenumerated shortly after resume anyway.
+        */
+       if (!usb_for_each_dev(NULL, persist_enabled_on_companion))
+               return;
+
        /* Make sure the ports are powered */
        port = HCS_N_PORTS(ehci->hcs_params);
        while (port--) {
@@ -876,7 +892,7 @@ static int ehci_hub_control (
                                                PORT_SUSPEND | PORT_RESUME);
                                ehci_writel(ehci, temp, status_reg);
                                clear_bit(wIndex, &ehci->resuming_ports);
-                               retval = handshake(ehci, status_reg,
+                               retval = ehci_handshake(ehci, status_reg,
                                           PORT_RESUME, 0, 2000 /* 2msec */);
                                if (retval != 0) {
                                        ehci_err(ehci,
@@ -902,7 +918,7 @@ static int ehci_hub_control (
                        /* REVISIT:  some hardware needs 550+ usec to clear
                         * this bit; seems too long to spin routinely...
                         */
-                       retval = handshake(ehci, status_reg,
+                       retval = ehci_handshake(ehci, status_reg,
                                        PORT_RESET, 0, 1000);
                        if (retval != 0) {
                                ehci_err (ehci, "port %d reset error %d\n",
index 402062973f032ca58e048a0d331ce526be4b8e0c..915c2db96dce8fdc3dded7b17915e501367f356e 100644 (file)
@@ -166,14 +166,14 @@ static int mv_ehci_probe(struct platform_device *pdev)
        if (IS_ERR(ehci_mv->clk)) {
                dev_err(&pdev->dev, "error getting clock\n");
                retval = PTR_ERR(ehci_mv->clk);
-               goto err_clear_drvdata;
+               goto err_put_hcd;
        }
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
        if (r == NULL) {
                dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
                retval = -ENODEV;
-               goto err_clear_drvdata;
+               goto err_put_hcd;
        }
 
        ehci_mv->phy_regs = devm_ioremap(&pdev->dev, r->start,
@@ -181,14 +181,14 @@ static int mv_ehci_probe(struct platform_device *pdev)
        if (ehci_mv->phy_regs == 0) {
                dev_err(&pdev->dev, "failed to map phy I/O memory\n");
                retval = -EFAULT;
-               goto err_clear_drvdata;
+               goto err_put_hcd;
        }
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
        if (!r) {
                dev_err(&pdev->dev, "no I/O memory resource defined\n");
                retval = -ENODEV;
-               goto err_clear_drvdata;
+               goto err_put_hcd;
        }
 
        ehci_mv->cap_regs = devm_ioremap(&pdev->dev, r->start,
@@ -196,13 +196,13 @@ static int mv_ehci_probe(struct platform_device *pdev)
        if (ehci_mv->cap_regs == NULL) {
                dev_err(&pdev->dev, "failed to map I/O memory\n");
                retval = -EFAULT;
-               goto err_clear_drvdata;
+               goto err_put_hcd;
        }
 
        retval = mv_ehci_enable(ehci_mv);
        if (retval) {
                dev_err(&pdev->dev, "init phy error %d\n", retval);
-               goto err_clear_drvdata;
+               goto err_put_hcd;
        }
 
        offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
@@ -274,8 +274,6 @@ err_set_vbus:
                pdata->set_vbus(0);
 err_disable_clk:
        mv_ehci_disable(ehci_mv);
-err_clear_drvdata:
-       platform_set_drvdata(pdev, NULL);
 err_put_hcd:
        usb_put_hcd(hcd);
 
@@ -300,8 +298,6 @@ static int mv_ehci_remove(struct platform_device *pdev)
                mv_ehci_disable(ehci_mv);
        }
 
-       platform_set_drvdata(pdev, NULL);
-
        usb_put_hcd(hcd);
 
        return 0;
index c369767b00e26de940eb6e8eca2e1ebe00b7e975..e4c34ac386c08936fcb532399fff1d5d39cbe1c3 100644 (file)
@@ -194,7 +194,6 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev)
                clk_disable_unprepare(priv->phyclk);
 
        usb_put_hcd(hcd);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index a89750fff4ff09d38d9dc1090590c1efd68f8d24..45cc00158412ac8a380cda88a28a4bb7536d62fa 100644 (file)
@@ -182,8 +182,6 @@ static int ehci_octeon_drv_remove(struct platform_device *pdev)
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 16d7150e855722be6bc143c504d31120adf53f83..9bd7dfe3315bba47bfd220d06d484fcdd4700230 100644 (file)
@@ -187,6 +187,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
                }
 
                omap->phy[i] = phy;
+
+               if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) {
+                       usb_phy_init(omap->phy[i]);
+                       /* bring PHY out of suspend */
+                       usb_phy_set_suspend(omap->phy[i], 0);
+               }
        }
 
        pm_runtime_enable(dev);
@@ -211,13 +217,14 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        }
 
        /*
-        * Bring PHYs out of reset.
+        * Bring PHYs out of reset for non PHY modes.
         * Even though HSIC mode is a PHY-less mode, the reset
         * line exists between the chips and can be modelled
         * as a PHY device for reset control.
         */
        for (i = 0; i < omap->nports; i++) {
-               if (!omap->phy[i])
+               if (!omap->phy[i] ||
+                    pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY)
                        continue;
 
                usb_phy_init(omap->phy[i]);
@@ -294,7 +301,7 @@ static struct platform_driver ehci_hcd_omap_driver = {
        /*.resume               = ehci_hcd_omap_resume, */
        .driver = {
                .name           = hcd_name,
-               .of_match_table = of_match_ptr(omap_ehci_dt_ids),
+               .of_match_table = omap_ehci_dt_ids,
        }
 };
 
index efbc588b48c50acbc2225c6be3f63ce1b1f35ffd..1a450aa13ebf04aaa3b8fef5ce9e296412b89373 100644 (file)
@@ -303,7 +303,7 @@ static struct platform_driver ehci_orion_driver = {
        .driver = {
                .name   = "orion-ehci",
                .owner  = THIS_MODULE,
-               .of_match_table = of_match_ptr(ehci_orion_dt_ids),
+               .of_match_table = ehci_orion_dt_ids,
        },
 };
 
index f47f2594c9d43f337719daaaf771c2ad7e5fd9d8..5733f8ed98f182c7bb0a579483989abef484cdc2 100644 (file)
@@ -146,7 +146,6 @@ static int ehci_platform_remove(struct platform_device *dev)
 
        usb_remove_hcd(hcd);
        usb_put_hcd(hcd);
-       platform_set_drvdata(dev, NULL);
 
        if (pdata->power_off)
                pdata->power_off(dev);
@@ -224,7 +223,7 @@ static struct platform_driver ehci_platform_driver = {
                .owner  = THIS_MODULE,
                .name   = "ehci-platform",
                .pm     = &ehci_platform_pm_ops,
-               .of_match_table = of_match_ptr(vt8500_ehci_ids),
+               .of_match_table = vt8500_ehci_ids,
        }
 };
 
index 363890ee41d2bcb640231b9d12b1d01b09a694b4..601e208bd782c07e9d0bb1b60d238ccbb7774758 100644 (file)
@@ -291,8 +291,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
        /*
         * basic lifecycle operations
         */
-       .reset =                ehci_msp_setup,
-       .start =                ehci_run,
+       .reset                  = ehci_msp_setup,
        .shutdown               = ehci_shutdown,
        .start                  = ehci_run,
        .stop                   = ehci_stop,
index 56dc732bf4511eef13af9af6a7b190df43795edc..86da09c0f8d0262479ecef2927ae36a6eb1f0516 100644 (file)
@@ -180,14 +180,12 @@ err_irq:
 
 static int ehci_hcd_ppc_of_remove(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
        struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 
        struct device_node *np;
        struct resource res;
 
-       dev_set_drvdata(&op->dev, NULL);
-
        dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
 
        usb_remove_hcd(hcd);
@@ -219,7 +217,7 @@ static int ehci_hcd_ppc_of_remove(struct platform_device *op)
 
 static void ehci_hcd_ppc_of_shutdown(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
index 379037f51a2fc41b7e7aaaf112a759ce70615ac5..7cc26e621aa7c509da43e3712c9f0febc9fbfcdd 100644 (file)
@@ -50,6 +50,8 @@ struct s5p_ehci_hcd {
        struct s5p_ehci_platdata *pdata;
 };
 
+static struct s5p_ehci_platdata empty_platdata;
+
 #define to_s5p_ehci(hcd)      (struct s5p_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
 
 static void s5p_setup_vbus_gpio(struct platform_device *pdev)
@@ -101,6 +103,13 @@ static int s5p_ehci_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        s5p_ehci = to_s5p_ehci(hcd);
+
+       if (of_device_is_compatible(pdev->dev.of_node,
+                                       "samsung,exynos5440-ehci")) {
+               s5p_ehci->pdata = &empty_platdata;
+               goto skip_phy;
+       }
+
        phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
        if (IS_ERR(phy)) {
                /* Fallback to pdata */
@@ -116,6 +125,8 @@ static int s5p_ehci_probe(struct platform_device *pdev)
                s5p_ehci->otg = phy->otg;
        }
 
+skip_phy:
+
        s5p_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
 
        if (IS_ERR(s5p_ehci->clk)) {
@@ -277,6 +288,7 @@ static const struct dev_pm_ops s5p_ehci_pm_ops = {
 #ifdef CONFIG_OF
 static const struct of_device_id exynos_ehci_match[] = {
        { .compatible = "samsung,exynos4210-ehci" },
+       { .compatible = "samsung,exynos5440-ehci" },
        {},
 };
 MODULE_DEVICE_TABLE(of, exynos_ehci_match);
index f55477c5a1be005d27741e6f6720ac6365920008..b2de52d3961488f249aeb9d4026efe2d603b72bb 100644 (file)
@@ -140,7 +140,6 @@ static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev)
 
        usb_remove_hcd(hcd);
        usb_put_hcd(hcd);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index b44d716ddc825e1b5905e0f4cbf481f3e176e83a..c4c0ee92a397b7172b9ecd04da481079ac272801 100644 (file)
@@ -176,7 +176,6 @@ static int ehci_hcd_sh_remove(struct platform_device *pdev)
 
        usb_remove_hcd(hcd);
        usb_put_hcd(hcd);
-       platform_set_drvdata(pdev, NULL);
 
        clk_disable(priv->fclk);
        clk_disable(priv->iclk);
index bd3e5cbc6240316dc5947f6f38dbc6a6c7492c51..1cf0adba3fc8dd4b3e071e8eeef769c5e9fbf1a2 100644 (file)
@@ -148,10 +148,6 @@ static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
        struct spear_ehci *sehci = to_spear_ehci(hcd);
 
-       if (!hcd)
-               return 0;
-       if (in_interrupt())
-               BUG();
        usb_remove_hcd(hcd);
 
        if (sehci->clk)
@@ -174,7 +170,7 @@ static struct platform_driver spear_ehci_hcd_driver = {
                .name = "spear-ehci",
                .bus = &platform_bus_type,
                .pm = &ehci_spear_pm_ops,
-               .of_match_table = of_match_ptr(spear_ehci_id_table),
+               .of_match_table = spear_ehci_id_table,
        }
 };
 
index 59d111bf44a9d961b45fc85b99d659a8a67230b4..6ee7ef79b4f86a5c802e33245592d40ec27127a5 100644 (file)
  */
 
 #include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/tegra_usb.h>
-#include <linux/irq.h>
-#include <linux/usb/otg.h>
 #include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tegra_usb.h>
 #include <linux/pm_runtime.h>
+#include <linux/slab.h>
 #include <linux/usb/ehci_def.h>
 #include <linux/usb/tegra_usb_phy.h>
-#include <linux/clk/tegra.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ehci.h"
 
 #define TEGRA_USB_BASE                 0xC5000000
 #define TEGRA_USB2_BASE                        0xC5004000
 #define TEGRA_USB3_BASE                        0xC5008000
 
-/* PORTSC registers */
-#define TEGRA_USB_PORTSC1                      0x184
-#define TEGRA_USB_PORTSC1_PTS(x)       (((x) & 0x3) << 30)
-#define TEGRA_USB_PORTSC1_PHCD (1 << 23)
+#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
 
 #define TEGRA_USB_DMA_ALIGN 32
 
+#define DRIVER_DESC "Tegra EHCI driver"
+#define DRV_NAME "tegra-ehci"
+
+static struct hc_driver __read_mostly tegra_ehci_hc_driver;
+
+static int (*orig_hub_control)(struct usb_hcd *hcd,
+                               u16 typeReq, u16 wValue, u16 wIndex,
+                               char *buf, u16 wLength);
+
 struct tegra_ehci_hcd {
-       struct ehci_hcd *ehci;
        struct tegra_usb_phy *phy;
        struct clk *clk;
        struct usb_phy *transceiver;
-       int host_resumed;
        int port_resuming;
        bool needs_double_reset;
        enum tegra_usb_phy_port_speed port_speed;
 };
 
-static void tegra_ehci_power_up(struct usb_hcd *hcd)
-{
-       struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-
-       clk_prepare_enable(tegra->clk);
-       usb_phy_set_suspend(hcd->phy, 0);
-       tegra->host_resumed = 1;
-}
-
-static void tegra_ehci_power_down(struct usb_hcd *hcd)
-{
-       struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-
-       tegra->host_resumed = 0;
-       usb_phy_set_suspend(hcd->phy, 1);
-       clk_disable_unprepare(tegra->clk);
-}
-
 static int tegra_ehci_internal_port_reset(
        struct ehci_hcd *ehci,
        u32 __iomem     *portsc_reg
@@ -144,8 +138,8 @@ static int tegra_ehci_hub_control(
        u16             wLength
 )
 {
-       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-       struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+       struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv;
        u32 __iomem     *status_reg;
        u32             temp;
        unsigned long   flags;
@@ -179,7 +173,7 @@ static int tegra_ehci_hub_control(
                 * If a transaction is in progress, there may be a delay in
                 * suspending the port. Poll until the port is suspended.
                 */
-               if (handshake(ehci, status_reg, PORT_SUSPEND,
+               if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
                                                PORT_SUSPEND, 5000))
                        pr_err("%s: timeout waiting for SUSPEND\n", __func__);
 
@@ -227,9 +221,9 @@ static int tegra_ehci_hub_control(
                spin_lock_irqsave(&ehci->lock, flags);
 
                /* Poll until the controller clears RESUME and SUSPEND */
-               if (handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
+               if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
                        pr_err("%s: timeout waiting for RESUME\n", __func__);
-               if (handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
+               if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
                        pr_err("%s: timeout waiting for SUSPEND\n", __func__);
 
                ehci->reset_done[wIndex-1] = 0;
@@ -242,58 +236,13 @@ static int tegra_ehci_hub_control(
        spin_unlock_irqrestore(&ehci->lock, flags);
 
        /* Handle the hub control events here */
-       return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+       return orig_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+
 done:
        spin_unlock_irqrestore(&ehci->lock, flags);
        return retval;
 }
 
-static void tegra_ehci_restart(struct usb_hcd *hcd)
-{
-       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
-       ehci_reset(ehci);
-
-       /* setup the frame list and Async q heads */
-       ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
-       ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
-       /* setup the command register and set the controller in RUN mode */
-       ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
-       ehci->command |= CMD_RUN;
-       ehci_writel(ehci, ehci->command, &ehci->regs->command);
-
-       down_write(&ehci_cf_port_reset_rwsem);
-       ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
-       /* flush posted writes */
-       ehci_readl(ehci, &ehci->regs->command);
-       up_write(&ehci_cf_port_reset_rwsem);
-}
-
-static void tegra_ehci_shutdown(struct usb_hcd *hcd)
-{
-       struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-
-       /* ehci_shutdown touches the USB controller registers, make sure
-        * controller has clocks to it */
-       if (!tegra->host_resumed)
-               tegra_ehci_power_up(hcd);
-
-       ehci_shutdown(hcd);
-}
-
-static int tegra_ehci_setup(struct usb_hcd *hcd)
-{
-       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
-       /* EHCI registers start at offset 0x100 */
-       ehci->caps = hcd->regs + 0x100;
-
-       /* switch to host mode */
-       hcd->has_tt = 1;
-
-       return ehci_setup(hcd);
-}
-
 struct dma_aligned_buffer {
        void *kmalloc_ptr;
        void *old_xfer_buffer;
@@ -373,38 +322,6 @@ static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
        free_dma_aligned_buffer(urb);
 }
 
-static const struct hc_driver tegra_ehci_hc_driver = {
-       .description            = hcd_name,
-       .product_desc           = "Tegra EHCI Host Controller",
-       .hcd_priv_size          = sizeof(struct ehci_hcd),
-       .flags                  = HCD_USB2 | HCD_MEMORY,
-
-       /* standard ehci functions */
-       .irq                    = ehci_irq,
-       .start                  = ehci_run,
-       .stop                   = ehci_stop,
-       .urb_enqueue            = ehci_urb_enqueue,
-       .urb_dequeue            = ehci_urb_dequeue,
-       .endpoint_disable       = ehci_endpoint_disable,
-       .endpoint_reset         = ehci_endpoint_reset,
-       .get_frame_number       = ehci_get_frame,
-       .hub_status_data        = ehci_hub_status_data,
-       .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-       .relinquish_port        = ehci_relinquish_port,
-       .port_handed_over       = ehci_port_handed_over,
-
-       /* modified ehci functions for tegra */
-       .reset                  = tegra_ehci_setup,
-       .shutdown               = tegra_ehci_shutdown,
-       .map_urb_for_dma        = tegra_ehci_map_urb_for_dma,
-       .unmap_urb_for_dma      = tegra_ehci_unmap_urb_for_dma,
-       .hub_control            = tegra_ehci_hub_control,
-#ifdef CONFIG_PM
-       .bus_suspend            = ehci_bus_suspend,
-       .bus_resume             = ehci_bus_resume,
-#endif
-};
-
 static int setup_vbus_gpio(struct platform_device *pdev,
                           struct tegra_ehci_platform_data *pdata)
 {
@@ -432,220 +349,16 @@ static int setup_vbus_gpio(struct platform_device *pdev,
        return err;
 }
 
-#ifdef CONFIG_PM
-
-static int controller_suspend(struct device *dev)
-{
-       struct tegra_ehci_hcd *tegra =
-                       platform_get_drvdata(to_platform_device(dev));
-       struct ehci_hcd *ehci = tegra->ehci;
-       struct usb_hcd *hcd = ehci_to_hcd(ehci);
-       struct ehci_regs __iomem *hw = ehci->regs;
-       unsigned long flags;
-
-       if (time_before(jiffies, ehci->next_statechange))
-               msleep(10);
-
-       ehci_halt(ehci);
-
-       spin_lock_irqsave(&ehci->lock, flags);
-       tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
-       clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-       spin_unlock_irqrestore(&ehci->lock, flags);
-
-       tegra_ehci_power_down(hcd);
-       return 0;
-}
-
-static int controller_resume(struct device *dev)
-{
-       struct tegra_ehci_hcd *tegra =
-                       platform_get_drvdata(to_platform_device(dev));
-       struct ehci_hcd *ehci = tegra->ehci;
-       struct usb_hcd *hcd = ehci_to_hcd(ehci);
-       struct ehci_regs __iomem *hw = ehci->regs;
-       unsigned long val;
-
-       set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-       tegra_ehci_power_up(hcd);
-
-       if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
-               /* Wait for the phy to detect new devices
-                * before we restart the controller */
-               msleep(10);
-               goto restart;
-       }
-
-       /* Force the phy to keep data lines in suspend state */
-       tegra_ehci_phy_restore_start(hcd->phy, tegra->port_speed);
-
-       /* Enable host mode */
-       tdi_reset(ehci);
-
-       /* Enable Port Power */
-       val = readl(&hw->port_status[0]);
-       val |= PORT_POWER;
-       writel(val, &hw->port_status[0]);
-       udelay(10);
-
-       /* Check if the phy resume from LP0. When the phy resume from LP0
-        * USB register will be reset. */
-       if (!readl(&hw->async_next)) {
-               /* Program the field PTC based on the saved speed mode */
-               val = readl(&hw->port_status[0]);
-               val &= ~PORT_TEST(~0);
-               if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
-                       val |= PORT_TEST_FORCE;
-               else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
-                       val |= PORT_TEST(6);
-               else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
-                       val |= PORT_TEST(7);
-               writel(val, &hw->port_status[0]);
-               udelay(10);
-
-               /* Disable test mode by setting PTC field to NORMAL_OP */
-               val = readl(&hw->port_status[0]);
-               val &= ~PORT_TEST(~0);
-               writel(val, &hw->port_status[0]);
-               udelay(10);
-       }
-
-       /* Poll until CCS is enabled */
-       if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
-                                                PORT_CONNECT, 2000)) {
-               pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
-               goto restart;
-       }
-
-       /* Poll until PE is enabled */
-       if (handshake(ehci, &hw->port_status[0], PORT_PE,
-                                                PORT_PE, 2000)) {
-               pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
-               goto restart;
-       }
-
-       /* Clear the PCI status, to avoid an interrupt taken upon resume */
-       val = readl(&hw->status);
-       val |= STS_PCD;
-       writel(val, &hw->status);
-
-       /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
-       val = readl(&hw->port_status[0]);
-       if ((val & PORT_POWER) && (val & PORT_PE)) {
-               val |= PORT_SUSPEND;
-               writel(val, &hw->port_status[0]);
-
-               /* Wait until port suspend completes */
-               if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
-                                                        PORT_SUSPEND, 1000)) {
-                       pr_err("%s: timeout waiting for PORT_SUSPEND\n",
-                                                               __func__);
-                       goto restart;
-               }
-       }
-
-       tegra_ehci_phy_restore_end(hcd->phy);
-       goto done;
-
- restart:
-       if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
-               tegra_ehci_phy_restore_end(hcd->phy);
-
-       tegra_ehci_restart(hcd);
-
- done:
-       tegra_usb_phy_preresume(hcd->phy);
-       tegra->port_resuming = 1;
-       return 0;
-}
-
-static int tegra_ehci_suspend(struct device *dev)
-{
-       struct tegra_ehci_hcd *tegra =
-                       platform_get_drvdata(to_platform_device(dev));
-       struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
-       int rc = 0;
-
-       /*
-        * When system sleep is supported and USB controller wakeup is
-        * implemented: If the controller is runtime-suspended and the
-        * wakeup setting needs to be changed, call pm_runtime_resume().
-        */
-       if (HCD_HW_ACCESSIBLE(hcd))
-               rc = controller_suspend(dev);
-       return rc;
-}
-
-static int tegra_ehci_resume(struct device *dev)
-{
-       int rc;
-
-       rc = controller_resume(dev);
-       if (rc == 0) {
-               pm_runtime_disable(dev);
-               pm_runtime_set_active(dev);
-               pm_runtime_enable(dev);
-       }
-       return rc;
-}
-
-static int tegra_ehci_runtime_suspend(struct device *dev)
-{
-       return controller_suspend(dev);
-}
-
-static int tegra_ehci_runtime_resume(struct device *dev)
-{
-       return controller_resume(dev);
-}
-
-static const struct dev_pm_ops tegra_ehci_pm_ops = {
-       .suspend        = tegra_ehci_suspend,
-       .resume         = tegra_ehci_resume,
-       .runtime_suspend = tegra_ehci_runtime_suspend,
-       .runtime_resume = tegra_ehci_runtime_resume,
-};
-
-#endif
-
-/* Bits of PORTSC1, which will get cleared by writing 1 into them */
-#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
-
-static void tegra_ehci_set_pts(struct usb_phy *x, u8 pts_val)
-{
-       unsigned long val;
-       struct usb_hcd *hcd = bus_to_hcd(x->otg->host);
-       void __iomem *base = hcd->regs;
-
-       val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
-       val &= ~TEGRA_USB_PORTSC1_PTS(3);
-       val |= TEGRA_USB_PORTSC1_PTS(pts_val & 3);
-       writel(val, base + TEGRA_USB_PORTSC1);
-}
-
-static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable)
-{
-       unsigned long val;
-       struct usb_hcd *hcd = bus_to_hcd(x->otg->host);
-       void __iomem *base = hcd->regs;
-
-       val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
-       if (enable)
-               val |= TEGRA_USB_PORTSC1_PHCD;
-       else
-               val &= ~TEGRA_USB_PORTSC1_PHCD;
-       writel(val, base + TEGRA_USB_PORTSC1);
-}
-
 static int tegra_ehci_probe(struct platform_device *pdev)
 {
        struct resource *res;
        struct usb_hcd *hcd;
+       struct ehci_hcd *ehci;
        struct tegra_ehci_hcd *tegra;
        struct tegra_ehci_platform_data *pdata;
        int err = 0;
        int irq;
-       int instance = pdev->id;
+       struct device_node *np_phy;
        struct usb_phy *u_phy;
 
        pdata = pdev->dev.platform_data;
@@ -665,35 +378,47 @@ static int tegra_ehci_probe(struct platform_device *pdev)
 
        setup_vbus_gpio(pdev, pdata);
 
-       tegra = devm_kzalloc(&pdev->dev, sizeof(struct tegra_ehci_hcd),
-                            GFP_KERNEL);
-       if (!tegra)
-               return -ENOMEM;
-
        hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
                                        dev_name(&pdev->dev));
        if (!hcd) {
                dev_err(&pdev->dev, "Unable to create HCD\n");
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto cleanup_vbus_gpio;
        }
+       platform_set_drvdata(pdev, hcd);
+       ehci = hcd_to_ehci(hcd);
+       tegra = (struct tegra_ehci_hcd *)ehci->priv;
 
-       platform_set_drvdata(pdev, tegra);
+       hcd->has_tt = 1;
 
        tegra->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(tegra->clk)) {
                dev_err(&pdev->dev, "Can't get ehci clock\n");
                err = PTR_ERR(tegra->clk);
-               goto fail_clk;
+               goto cleanup_hcd_create;
        }
 
        err = clk_prepare_enable(tegra->clk);
        if (err)
-               goto fail_clk;
+               goto cleanup_clk_get;
 
        tegra_periph_reset_assert(tegra->clk);
        udelay(1);
        tegra_periph_reset_deassert(tegra->clk);
 
+       np_phy = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
+       if (!np_phy) {
+               err = -ENODEV;
+               goto cleanup_clk_en;
+       }
+
+       u_phy = tegra_usb_get_phy(np_phy);
+       if (IS_ERR(u_phy)) {
+               err = PTR_ERR(u_phy);
+               goto cleanup_clk_en;
+       }
+       hcd->phy = u_phy;
+
        tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
                "nvidia,needs-double-reset");
 
@@ -701,7 +426,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
        if (!res) {
                dev_err(&pdev->dev, "Failed to get I/O memory\n");
                err = -ENXIO;
-               goto fail_io;
+               goto cleanup_clk_en;
        }
        hcd->rsrc_start = res->start;
        hcd->rsrc_len = resource_size(res);
@@ -709,68 +434,36 @@ static int tegra_ehci_probe(struct platform_device *pdev)
        if (!hcd->regs) {
                dev_err(&pdev->dev, "Failed to remap I/O memory\n");
                err = -ENOMEM;
-               goto fail_io;
-       }
-
-       /* This is pretty ugly and needs to be fixed when we do only
-        * device-tree probing. Old code relies on the platform_device
-        * numbering that we lack for device-tree-instantiated devices.
-        */
-       if (instance < 0) {
-               switch (res->start) {
-               case TEGRA_USB_BASE:
-                       instance = 0;
-                       break;
-               case TEGRA_USB2_BASE:
-                       instance = 1;
-                       break;
-               case TEGRA_USB3_BASE:
-                       instance = 2;
-                       break;
-               default:
-                       err = -ENODEV;
-                       dev_err(&pdev->dev, "unknown usb instance\n");
-                       goto fail_io;
-               }
+               goto cleanup_clk_en;
        }
+       ehci->caps = hcd->regs + 0x100;
 
-       tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
-                                       pdata->phy_config,
-                                       TEGRA_USB_PHY_MODE_HOST,
-                                       tegra_ehci_set_pts,
-                                       tegra_ehci_set_phcd);
-       if (IS_ERR(tegra->phy)) {
-               dev_err(&pdev->dev, "Failed to open USB phy\n");
-               err = -ENXIO;
-               goto fail_io;
+       err = usb_phy_init(hcd->phy);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize phy\n");
+               goto cleanup_clk_en;
        }
 
-       hcd->phy = u_phy = &tegra->phy->u_phy;
-       usb_phy_init(hcd->phy);
-
        u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
                             GFP_KERNEL);
        if (!u_phy->otg) {
                dev_err(&pdev->dev, "Failed to alloc memory for otg\n");
                err = -ENOMEM;
-               goto fail_io;
+               goto cleanup_phy;
        }
        u_phy->otg->host = hcd_to_bus(hcd);
 
        err = usb_phy_set_suspend(hcd->phy, 0);
        if (err) {
                dev_err(&pdev->dev, "Failed to power on the phy\n");
-               goto fail_phy;
+               goto cleanup_phy;
        }
 
-       tegra->host_resumed = 1;
-       tegra->ehci = hcd_to_ehci(hcd);
-
        irq = platform_get_irq(pdev, 0);
        if (!irq) {
                dev_err(&pdev->dev, "Failed to get IRQ\n");
                err = -ENODEV;
-               goto fail_phy;
+               goto cleanup_phy;
        }
 
        if (pdata->operating_mode == TEGRA_USB_OTG) {
@@ -785,39 +478,32 @@ static int tegra_ehci_probe(struct platform_device *pdev)
        err = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (err) {
                dev_err(&pdev->dev, "Failed to add USB HCD\n");
-               goto fail;
+               goto cleanup_transceiver;
        }
 
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_get_noresume(&pdev->dev);
-
-       /* Don't skip the pm_runtime_forbid call if wakeup isn't working */
-       /* if (!pdata->power_down_on_bus_suspend) */
-               pm_runtime_forbid(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_put_sync(&pdev->dev);
        return err;
 
-fail:
+cleanup_transceiver:
        if (!IS_ERR(tegra->transceiver))
                otg_set_host(tegra->transceiver->otg, NULL);
-fail_phy:
+cleanup_phy:
        usb_phy_shutdown(hcd->phy);
-fail_io:
+cleanup_clk_en:
        clk_disable_unprepare(tegra->clk);
-fail_clk:
+cleanup_clk_get:
+       clk_put(tegra->clk);
+cleanup_hcd_create:
        usb_put_hcd(hcd);
+cleanup_vbus_gpio:
+       /* FIXME: Undo setup_vbus_gpio() here */
        return err;
 }
 
 static int tegra_ehci_remove(struct platform_device *pdev)
 {
-       struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
-       struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
-
-       pm_runtime_get_sync(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-       pm_runtime_put_noidle(&pdev->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(pdev);
+       struct tegra_ehci_hcd *tegra =
+               (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
 
        if (!IS_ERR(tegra->transceiver))
                otg_set_host(tegra->transceiver->otg, NULL);
@@ -833,8 +519,7 @@ static int tegra_ehci_remove(struct platform_device *pdev)
 
 static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
 {
-       struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
-       struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+       struct usb_hcd *hcd = platform_get_drvdata(pdev);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
@@ -850,10 +535,50 @@ static struct platform_driver tegra_ehci_driver = {
        .remove         = tegra_ehci_remove,
        .shutdown       = tegra_ehci_hcd_shutdown,
        .driver         = {
-               .name   = "tegra-ehci",
+               .name   = DRV_NAME,
                .of_match_table = tegra_ehci_of_match,
-#ifdef CONFIG_PM
-               .pm     = &tegra_ehci_pm_ops,
-#endif
        }
 };
+
+static const struct ehci_driver_overrides tegra_overrides __initconst = {
+       .extra_priv_size        = sizeof(struct tegra_ehci_hcd),
+};
+
+static int __init ehci_tegra_init(void)
+{
+       if (usb_disabled())
+               return -ENODEV;
+
+       pr_info(DRV_NAME ": " DRIVER_DESC "\n");
+
+       ehci_init_driver(&tegra_ehci_hc_driver, &tegra_overrides);
+
+       /*
+        * The Tegra HW has some unusual quirks, which require Tegra-specific
+        * workarounds. We override certain hc_driver functions here to
+        * achieve that. We explicitly do not enhance ehci_driver_overrides to
+        * allow this more easily, since this is an unusual case, and we don't
+        * want to encourage others to override these functions by making it
+        * too easy.
+        */
+
+       orig_hub_control = tegra_ehci_hc_driver.hub_control;
+
+       tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma;
+       tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma;
+       tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control;
+
+       return platform_driver_register(&tegra_ehci_driver);
+}
+module_init(ehci_tegra_init);
+
+static void __exit ehci_tegra_cleanup(void)
+{
+       platform_driver_unregister(&tegra_ehci_driver);
+}
+module_exit(ehci_tegra_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra_ehci_of_match);
index b083a350eea35f94ddf15903643ecc2351a8ac1c..d72b2929c03db50e103ee3f481694001fddae3c1 100644 (file)
@@ -193,7 +193,6 @@ static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
        tilegx_stop_ehc();
        gxio_usb_host_destroy(&pdata->usb_ctx);
        destroy_irq(pdata->irq);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index d845e3bcfaffb14a1015f6a9174863228a1929f5..35c7f90384a6f03d947dee5576a5ba41dfd3c004 100644 (file)
@@ -209,8 +209,7 @@ err_irq:
  */
 static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-       dev_set_drvdata(&op->dev, NULL);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
 
@@ -229,7 +228,7 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
  */
 static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
index 7c978b23520d07a871396db9626293fd352944ca..64f9a08e959c73db9359112b1ae6bb889e0b29c4 100644 (file)
@@ -800,6 +800,8 @@ struct ehci_driver_overrides {
 extern void    ehci_init_driver(struct hc_driver *drv,
                                const struct ehci_driver_overrides *over);
 extern int     ehci_setup(struct usb_hcd *hcd);
+extern int     ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+                               u32 mask, u32 done, int usec);
 
 #ifdef CONFIG_PM
 extern int     ehci_suspend(struct usb_hcd *hcd, bool do_wakeup);
index 8f18538e0ff752ceef59e44381a5a00bc2e9ce6b..95ca5986e672dc90cfa97e685e2bdb6fce04a3a6 100644 (file)
@@ -739,9 +739,13 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
        }
 
        /* for ISO transfer calculate start frame index */
-       if (ed->mode == FHCI_TF_ISO && urb->transfer_flags & URB_ISO_ASAP)
-               urb->start_frame = ed->td_head ? ed->last_iso + 1 :
+       if (ed->mode == FHCI_TF_ISO) {
+               /* Ignore the possibility of underruns */
+               urb->start_frame = ed->td_head ? ed->next_iso :
                                                 get_frame_num(fhci);
+               ed->next_iso = (urb->start_frame + urb->interval *
+                               urb->number_of_packets) & 0x07ff;
+       }
 
        /*
         * OHCI handles the DATA toggle itself,we just use the USB
index 7cc1c32dc36cdf2c2a05aa4a4a43aa875bb8a9dc..154e6a007727159498a3a5278aa51de69d9e34f0 100644 (file)
@@ -338,7 +338,7 @@ struct ed {
 
        /* read only parameters, should be cleared upon initialization */
        u8 toggle_carry;        /* toggle carry from the last TD submitted */
-       u32 last_iso;           /* time stamp of last queued ISO transfer */
+       u16 next_iso;           /* time stamp of next queued ISO transfer */
        struct td *td_head;     /* a pointer to the current TD handled */
 };
 
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
new file mode 100644 (file)
index 0000000..299253c
--- /dev/null
@@ -0,0 +1,5972 @@
+/*
+ * Faraday FUSBH200 EHCI-like driver
+ *
+ * Copyright (c) 2013 Faraday Technology Corporation
+ *
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ *        Feng-Hsin Chiang <john453@faraday-tech.com>
+ *        Po-Yu Chuang <ratbert.chuang@gmail.com>
+ *
+ * Most of code borrowed from the Linux-3.7 EHCI driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+/*-------------------------------------------------------------------------*/
+#define DRIVER_AUTHOR "Yuan-Hsin Chen"
+#define DRIVER_DESC "FUSBH200 Host Controller (EHCI) Driver"
+
+static const char      hcd_name [] = "fusbh200_hcd";
+
+#undef VERBOSE_DEBUG
+#undef FUSBH200_URB_TRACE
+
+#ifdef DEBUG
+#define FUSBH200_STATS
+#endif
+
+/* magic numbers that can affect system performance */
+#define        FUSBH200_TUNE_CERR              3       /* 0-3 qtd retries; 0 == don't stop */
+#define        FUSBH200_TUNE_RL_HS             4       /* nak throttle; see 4.9 */
+#define        FUSBH200_TUNE_RL_TT             0
+#define        FUSBH200_TUNE_MULT_HS   1       /* 1-3 transactions/uframe; 4.10.3 */
+#define        FUSBH200_TUNE_MULT_TT   1
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code).  In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define        FUSBH200_TUNE_FLS               1       /* (medium) 512-frame schedule */
+
+/* Initial IRQ latency:  faster than hw default */
+static int log2_irq_thresh = 0;                // 0 to 6
+module_param (log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting:  slower than hw default */
+static unsigned park = 0;
+module_param (park, uint, S_IRUGO);
+MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
+
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
+
+#define        INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+#include "fusbh200.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_dbg(fusbh200, fmt, args...) \
+       dev_dbg (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_err(fusbh200, fmt, args...) \
+       dev_err (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_info(fusbh200, fmt, args...) \
+       dev_info (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+#define fusbh200_warn(fusbh200, fmt, args...) \
+       dev_warn (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
+
+#ifdef VERBOSE_DEBUG
+#      define fusbh200_vdbg fusbh200_dbg
+#else
+       static inline void fusbh200_vdbg(struct fusbh200_hcd *fusbh200, ...) {}
+#endif
+
+#ifdef DEBUG
+
+/* check the values in the HCSPARAMS register
+ * (host controller _Structural_ parameters)
+ * see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label)
+{
+       u32     params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+
+       fusbh200_dbg (fusbh200,
+               "%s hcs_params 0x%x ports=%d\n",
+               label, params,
+               HCS_N_PORTS (params)
+               );
+}
+#else
+
+static inline void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label) {}
+
+#endif
+
+#ifdef DEBUG
+
+/* check the values in the HCCPARAMS register
+ * (host controller _Capability_ parameters)
+ * see EHCI Spec, Table 2-5 for each value
+ * */
+static void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label)
+{
+       u32     params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+       fusbh200_dbg (fusbh200,
+               "%s hcc_params %04x uframes %s%s\n",
+               label,
+               params,
+               HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+               HCC_CANPARK(params) ? " park" : "");
+}
+#else
+
+static inline void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label) {}
+
+#endif
+
+#ifdef DEBUG
+
+static void __maybe_unused
+dbg_qtd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
+{
+       fusbh200_dbg(fusbh200, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+               hc32_to_cpup(fusbh200, &qtd->hw_next),
+               hc32_to_cpup(fusbh200, &qtd->hw_alt_next),
+               hc32_to_cpup(fusbh200, &qtd->hw_token),
+               hc32_to_cpup(fusbh200, &qtd->hw_buf [0]));
+       if (qtd->hw_buf [1])
+               fusbh200_dbg(fusbh200, "  p1=%08x p2=%08x p3=%08x p4=%08x\n",
+                       hc32_to_cpup(fusbh200, &qtd->hw_buf[1]),
+                       hc32_to_cpup(fusbh200, &qtd->hw_buf[2]),
+                       hc32_to_cpup(fusbh200, &qtd->hw_buf[3]),
+                       hc32_to_cpup(fusbh200, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       struct fusbh200_qh_hw *hw = qh->hw;
+
+       fusbh200_dbg (fusbh200, "%s qh %p n%08x info %x %x qtd %x\n", label,
+               qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+       dbg_qtd("overlay", fusbh200, (struct fusbh200_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
+{
+       fusbh200_dbg (fusbh200, "%s [%d] itd %p, next %08x, urb %p\n",
+               label, itd->frame, itd, hc32_to_cpu(fusbh200, itd->hw_next),
+               itd->urb);
+       fusbh200_dbg (fusbh200,
+               "  trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+               hc32_to_cpu(fusbh200, itd->hw_transaction[0]),
+               hc32_to_cpu(fusbh200, itd->hw_transaction[1]),
+               hc32_to_cpu(fusbh200, itd->hw_transaction[2]),
+               hc32_to_cpu(fusbh200, itd->hw_transaction[3]),
+               hc32_to_cpu(fusbh200, itd->hw_transaction[4]),
+               hc32_to_cpu(fusbh200, itd->hw_transaction[5]),
+               hc32_to_cpu(fusbh200, itd->hw_transaction[6]),
+               hc32_to_cpu(fusbh200, itd->hw_transaction[7]));
+       fusbh200_dbg (fusbh200,
+               "  buf:   %08x %08x %08x %08x %08x %08x %08x\n",
+               hc32_to_cpu(fusbh200, itd->hw_bufp[0]),
+               hc32_to_cpu(fusbh200, itd->hw_bufp[1]),
+               hc32_to_cpu(fusbh200, itd->hw_bufp[2]),
+               hc32_to_cpu(fusbh200, itd->hw_bufp[3]),
+               hc32_to_cpu(fusbh200, itd->hw_bufp[4]),
+               hc32_to_cpu(fusbh200, itd->hw_bufp[5]),
+               hc32_to_cpu(fusbh200, itd->hw_bufp[6]));
+       fusbh200_dbg (fusbh200, "  index: %d %d %d %d %d %d %d %d\n",
+               itd->index[0], itd->index[1], itd->index[2],
+               itd->index[3], itd->index[4], itd->index[5],
+               itd->index[6], itd->index[7]);
+}
+
+static int __maybe_unused
+dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+{
+       return scnprintf (buf, len,
+               "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+               label, label [0] ? " " : "", status,
+               (status & STS_ASS) ? " Async" : "",
+               (status & STS_PSS) ? " Periodic" : "",
+               (status & STS_RECL) ? " Recl" : "",
+               (status & STS_HALT) ? " Halt" : "",
+               (status & STS_IAA) ? " IAA" : "",
+               (status & STS_FATAL) ? " FATAL" : "",
+               (status & STS_FLR) ? " FLR" : "",
+               (status & STS_PCD) ? " PCD" : "",
+               (status & STS_ERR) ? " ERR" : "",
+               (status & STS_INT) ? " INT" : ""
+               );
+}
+
+static int __maybe_unused
+dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+{
+       return scnprintf (buf, len,
+               "%s%sintrenable %02x%s%s%s%s%s%s",
+               label, label [0] ? " " : "", enable,
+               (enable & STS_IAA) ? " IAA" : "",
+               (enable & STS_FATAL) ? " FATAL" : "",
+               (enable & STS_FLR) ? " FLR" : "",
+               (enable & STS_PCD) ? " PCD" : "",
+               (enable & STS_ERR) ? " ERR" : "",
+               (enable & STS_INT) ? " INT" : ""
+               );
+}
+
+static const char *const fls_strings [] =
+    { "1024", "512", "256", "??" };
+
+static int
+dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+{
+       return scnprintf (buf, len,
+               "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
+               "period=%s%s %s",
+               label, label [0] ? " " : "", command,
+               (command & CMD_PARK) ? " park" : "(park)",
+               CMD_PARK_CNT (command),
+               (command >> 16) & 0x3f,
+               (command & CMD_IAAD) ? " IAAD" : "",
+               (command & CMD_ASE) ? " Async" : "",
+               (command & CMD_PSE) ? " Periodic" : "",
+               fls_strings [(command >> 2) & 0x3],
+               (command & CMD_RESET) ? " Reset" : "",
+               (command & CMD_RUN) ? "RUN" : "HALT"
+               );
+}
+
+static int
+dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
+{
+       char    *sig;
+
+       /* signaling state */
+       switch (status & (3 << 10)) {
+       case 0 << 10: sig = "se0"; break;
+       case 1 << 10: sig = "k"; break;         /* low speed */
+       case 2 << 10: sig = "j"; break;
+       default: sig = "?"; break;
+       }
+
+       return scnprintf (buf, len,
+               "%s%sport:%d status %06x %d "
+               "sig=%s%s%s%s%s%s%s%s",
+               label, label [0] ? " " : "", port, status,
+               status>>25,/*device address */
+               sig,
+               (status & PORT_RESET) ? " RESET" : "",
+               (status & PORT_SUSPEND) ? " SUSPEND" : "",
+               (status & PORT_RESUME) ? " RESUME" : "",
+               (status & PORT_PEC) ? " PEC" : "",
+               (status & PORT_PE) ? " PE" : "",
+               (status & PORT_CSC) ? " CSC" : "",
+               (status & PORT_CONNECT) ? " CONNECT" : "");
+}
+
+#else
+static inline void __maybe_unused
+dbg_qh (char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{}
+
+static inline int __maybe_unused
+dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+{ return 0; }
+
+static inline int __maybe_unused
+dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+{ return 0; }
+
+static inline int __maybe_unused
+dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+{ return 0; }
+
+static inline int __maybe_unused
+dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
+{ return 0; }
+
+#endif /* DEBUG */
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(fusbh200, label, status) { \
+       char _buf [80]; \
+       dbg_status_buf (_buf, sizeof _buf, label, status); \
+       fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+#define dbg_cmd(fusbh200, label, command) { \
+       char _buf [80]; \
+       dbg_command_buf (_buf, sizeof _buf, label, command); \
+       fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+#define dbg_port(fusbh200, label, port, status) { \
+       char _buf [80]; \
+       dbg_port_buf (_buf, sizeof _buf, label, port, status); \
+       fusbh200_dbg (fusbh200, "%s\n", _buf); \
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef STUB_DEBUG_FILES
+
+static inline void create_debug_files (struct fusbh200_hcd *bus) { }
+static inline void remove_debug_files (struct fusbh200_hcd *bus) { }
+
+#else
+
+/* troubleshooting help: expose state in debugfs */
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+       .owner          = THIS_MODULE,
+       .open           = debug_async_open,
+       .read           = debug_output,
+       .release        = debug_close,
+       .llseek         = default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+       .owner          = THIS_MODULE,
+       .open           = debug_periodic_open,
+       .read           = debug_output,
+       .release        = debug_close,
+       .llseek         = default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+       .owner          = THIS_MODULE,
+       .open           = debug_registers_open,
+       .read           = debug_output,
+       .release        = debug_close,
+       .llseek         = default_llseek,
+};
+
+static struct dentry *fusbh200_debug_root;
+
+struct debug_buffer {
+       ssize_t (*fill_func)(struct debug_buffer *);    /* fill method */
+       struct usb_bus *bus;
+       struct mutex mutex;     /* protect filling of buffer */
+       size_t count;           /* number of characters filled into buffer */
+       char *output_buf;
+       size_t alloc_size;
+};
+
+#define speed_char(info1) ({ char tmp; \
+               switch (info1 & (3 << 12)) { \
+               case QH_FULL_SPEED: tmp = 'f'; break; \
+               case QH_LOW_SPEED:  tmp = 'l'; break; \
+               case QH_HIGH_SPEED: tmp = 'h'; break; \
+               default: tmp = '?'; break; \
+               }; tmp; })
+
+static inline char token_mark(struct fusbh200_hcd *fusbh200, __hc32 token)
+{
+       __u32 v = hc32_to_cpu(fusbh200, token);
+
+       if (v & QTD_STS_ACTIVE)
+               return '*';
+       if (v & QTD_STS_HALT)
+               return '-';
+       if (!IS_SHORT_READ (v))
+               return ' ';
+       /* tries to advance through hw_alt_next */
+       return '/';
+}
+
+static void qh_lines (
+       struct fusbh200_hcd *fusbh200,
+       struct fusbh200_qh *qh,
+       char **nextp,
+       unsigned *sizep
+)
+{
+       u32                     scratch;
+       u32                     hw_curr;
+       struct fusbh200_qtd             *td;
+       unsigned                temp;
+       unsigned                size = *sizep;
+       char                    *next = *nextp;
+       char                    mark;
+       __le32                  list_end = FUSBH200_LIST_END(fusbh200);
+       struct fusbh200_qh_hw   *hw = qh->hw;
+
+       if (hw->hw_qtd_next == list_end)        /* NEC does this */
+               mark = '@';
+       else
+               mark = token_mark(fusbh200, hw->hw_token);
+       if (mark == '/') {      /* qh_alt_next controls qh advance? */
+               if ((hw->hw_alt_next & QTD_MASK(fusbh200))
+                               == fusbh200->async->hw->hw_alt_next)
+                       mark = '#';     /* blocked */
+               else if (hw->hw_alt_next == list_end)
+                       mark = '.';     /* use hw_qtd_next */
+               /* else alt_next points to some other qtd */
+       }
+       scratch = hc32_to_cpup(fusbh200, &hw->hw_info1);
+       hw_curr = (mark == '*') ? hc32_to_cpup(fusbh200, &hw->hw_current) : 0;
+       temp = scnprintf (next, size,
+                       "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
+                       qh, scratch & 0x007f,
+                       speed_char (scratch),
+                       (scratch >> 8) & 0x000f,
+                       scratch, hc32_to_cpup(fusbh200, &hw->hw_info2),
+                       hc32_to_cpup(fusbh200, &hw->hw_token), mark,
+                       (cpu_to_hc32(fusbh200, QTD_TOGGLE) & hw->hw_token)
+                               ? "data1" : "data0",
+                       (hc32_to_cpup(fusbh200, &hw->hw_alt_next) >> 1) & 0x0f);
+       size -= temp;
+       next += temp;
+
+       /* hc may be modifying the list as we read it ... */
+       list_for_each_entry(td, &qh->qtd_list, qtd_list) {
+               scratch = hc32_to_cpup(fusbh200, &td->hw_token);
+               mark = ' ';
+               if (hw_curr == td->qtd_dma)
+                       mark = '*';
+               else if (hw->hw_qtd_next == cpu_to_hc32(fusbh200, td->qtd_dma))
+                       mark = '+';
+               else if (QTD_LENGTH (scratch)) {
+                       if (td->hw_alt_next == fusbh200->async->hw->hw_alt_next)
+                               mark = '#';
+                       else if (td->hw_alt_next != list_end)
+                               mark = '/';
+               }
+               temp = snprintf (next, size,
+                               "\n\t%p%c%s len=%d %08x urb %p",
+                               td, mark, ({ char *tmp;
+                                switch ((scratch>>8)&0x03) {
+                                case 0: tmp = "out"; break;
+                                case 1: tmp = "in"; break;
+                                case 2: tmp = "setup"; break;
+                                default: tmp = "?"; break;
+                                } tmp;}),
+                               (scratch >> 16) & 0x7fff,
+                               scratch,
+                               td->urb);
+               if (size < temp)
+                       temp = size;
+               size -= temp;
+               next += temp;
+               if (temp == size)
+                       goto done;
+       }
+
+       temp = snprintf (next, size, "\n");
+       if (size < temp)
+               temp = size;
+       size -= temp;
+       next += temp;
+
+done:
+       *sizep = size;
+       *nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+       struct usb_hcd          *hcd;
+       struct fusbh200_hcd     *fusbh200;
+       unsigned long           flags;
+       unsigned                temp, size;
+       char                    *next;
+       struct fusbh200_qh              *qh;
+
+       hcd = bus_to_hcd(buf->bus);
+       fusbh200 = hcd_to_fusbh200 (hcd);
+       next = buf->output_buf;
+       size = buf->alloc_size;
+
+       *next = 0;
+
+       /* dumps a snapshot of the async schedule.
+        * usually empty except for long-term bulk reads, or head.
+        * one QH per line, and TDs we know about
+        */
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       for (qh = fusbh200->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
+               qh_lines (fusbh200, qh, &next, &size);
+       if (fusbh200->async_unlink && size > 0) {
+               temp = scnprintf(next, size, "\nunlink =\n");
+               size -= temp;
+               next += temp;
+
+               for (qh = fusbh200->async_unlink; size > 0 && qh;
+                               qh = qh->unlink_next)
+                       qh_lines (fusbh200, qh, &next, &size);
+       }
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+       return strlen(buf->output_buf);
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+       struct usb_hcd          *hcd;
+       struct fusbh200_hcd             *fusbh200;
+       unsigned long           flags;
+       union fusbh200_shadow   p, *seen;
+       unsigned                temp, size, seen_count;
+       char                    *next;
+       unsigned                i;
+       __hc32                  tag;
+
+       if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
+               return 0;
+       seen_count = 0;
+
+       hcd = bus_to_hcd(buf->bus);
+       fusbh200 = hcd_to_fusbh200 (hcd);
+       next = buf->output_buf;
+       size = buf->alloc_size;
+
+       temp = scnprintf (next, size, "size = %d\n", fusbh200->periodic_size);
+       size -= temp;
+       next += temp;
+
+       /* dump a snapshot of the periodic schedule.
+        * iso changes, interrupt usually doesn't.
+        */
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       for (i = 0; i < fusbh200->periodic_size; i++) {
+               p = fusbh200->pshadow [i];
+               if (likely (!p.ptr))
+                       continue;
+               tag = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [i]);
+
+               temp = scnprintf (next, size, "%4d: ", i);
+               size -= temp;
+               next += temp;
+
+               do {
+                       struct fusbh200_qh_hw *hw;
+
+                       switch (hc32_to_cpu(fusbh200, tag)) {
+                       case Q_TYPE_QH:
+                               hw = p.qh->hw;
+                               temp = scnprintf (next, size, " qh%d-%04x/%p",
+                                               p.qh->period,
+                                               hc32_to_cpup(fusbh200,
+                                                       &hw->hw_info2)
+                                                       /* uframe masks */
+                                                       & (QH_CMASK | QH_SMASK),
+                                               p.qh);
+                               size -= temp;
+                               next += temp;
+                               /* don't repeat what follows this qh */
+                               for (temp = 0; temp < seen_count; temp++) {
+                                       if (seen [temp].ptr != p.ptr)
+                                               continue;
+                                       if (p.qh->qh_next.ptr) {
+                                               temp = scnprintf (next, size,
+                                                       " ...");
+                                               size -= temp;
+                                               next += temp;
+                                       }
+                                       break;
+                               }
+                               /* show more info the first time around */
+                               if (temp == seen_count) {
+                                       u32     scratch = hc32_to_cpup(fusbh200,
+                                                       &hw->hw_info1);
+                                       struct fusbh200_qtd     *qtd;
+                                       char            *type = "";
+
+                                       /* count tds, get ep direction */
+                                       temp = 0;
+                                       list_for_each_entry (qtd,
+                                                       &p.qh->qtd_list,
+                                                       qtd_list) {
+                                               temp++;
+                                               switch (0x03 & (hc32_to_cpu(
+                                                       fusbh200,
+                                                       qtd->hw_token) >> 8)) {
+                                               case 0: type = "out"; continue;
+                                               case 1: type = "in"; continue;
+                                               }
+                                       }
+
+                                       temp = scnprintf (next, size,
+                                               " (%c%d ep%d%s "
+                                               "[%d/%d] q%d p%d)",
+                                               speed_char (scratch),
+                                               scratch & 0x007f,
+                                               (scratch >> 8) & 0x000f, type,
+                                               p.qh->usecs, p.qh->c_usecs,
+                                               temp,
+                                               0x7ff & (scratch >> 16));
+
+                                       if (seen_count < DBG_SCHED_LIMIT)
+                                               seen [seen_count++].qh = p.qh;
+                               } else
+                                       temp = 0;
+                               tag = Q_NEXT_TYPE(fusbh200, hw->hw_next);
+                               p = p.qh->qh_next;
+                               break;
+                       case Q_TYPE_FSTN:
+                               temp = scnprintf (next, size,
+                                       " fstn-%8x/%p", p.fstn->hw_prev,
+                                       p.fstn);
+                               tag = Q_NEXT_TYPE(fusbh200, p.fstn->hw_next);
+                               p = p.fstn->fstn_next;
+                               break;
+                       case Q_TYPE_ITD:
+                               temp = scnprintf (next, size,
+                                       " itd/%p", p.itd);
+                               tag = Q_NEXT_TYPE(fusbh200, p.itd->hw_next);
+                               p = p.itd->itd_next;
+                               break;
+                       }
+                       size -= temp;
+                       next += temp;
+               } while (p.ptr);
+
+               temp = scnprintf (next, size, "\n");
+               size -= temp;
+               next += temp;
+       }
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       kfree (seen);
+
+       return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct fusbh200_hcd *fusbh200)
+{
+       switch (fusbh200->rh_state) {
+       case FUSBH200_RH_HALTED:
+               return "halted";
+       case FUSBH200_RH_SUSPENDED:
+               return "suspended";
+       case FUSBH200_RH_RUNNING:
+               return "running";
+       case FUSBH200_RH_STOPPING:
+               return "stopping";
+       }
+       return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+       struct usb_hcd          *hcd;
+       struct fusbh200_hcd     *fusbh200;
+       unsigned long           flags;
+       unsigned                temp, size, i;
+       char                    *next, scratch [80];
+       static char             fmt [] = "%*s\n";
+       static char             label [] = "";
+
+       hcd = bus_to_hcd(buf->bus);
+       fusbh200 = hcd_to_fusbh200 (hcd);
+       next = buf->output_buf;
+       size = buf->alloc_size;
+
+       spin_lock_irqsave (&fusbh200->lock, flags);
+
+       if (!HCD_HW_ACCESSIBLE(hcd)) {
+               size = scnprintf (next, size,
+                       "bus %s, device %s\n"
+                       "%s\n"
+                       "SUSPENDED (no register access)\n",
+                       hcd->self.controller->bus->name,
+                       dev_name(hcd->self.controller),
+                       hcd->product_desc);
+               goto done;
+       }
+
+       /* Capability Registers */
+       i = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+       temp = scnprintf (next, size,
+               "bus %s, device %s\n"
+               "%s\n"
+               "EHCI %x.%02x, rh state %s\n",
+               hcd->self.controller->bus->name,
+               dev_name(hcd->self.controller),
+               hcd->product_desc,
+               i >> 8, i & 0x0ff, rh_state_string(fusbh200));
+       size -= temp;
+       next += temp;
+
+       // FIXME interpret both types of params
+       i = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+       temp = scnprintf (next, size, "structural params 0x%08x\n", i);
+       size -= temp;
+       next += temp;
+
+       i = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+       temp = scnprintf (next, size, "capability params 0x%08x\n", i);
+       size -= temp;
+       next += temp;
+
+       /* Operational Registers */
+       temp = dbg_status_buf (scratch, sizeof scratch, label,
+                       fusbh200_readl(fusbh200, &fusbh200->regs->status));
+       temp = scnprintf (next, size, fmt, temp, scratch);
+       size -= temp;
+       next += temp;
+
+       temp = dbg_command_buf (scratch, sizeof scratch, label,
+                       fusbh200_readl(fusbh200, &fusbh200->regs->command));
+       temp = scnprintf (next, size, fmt, temp, scratch);
+       size -= temp;
+       next += temp;
+
+       temp = dbg_intr_buf (scratch, sizeof scratch, label,
+                       fusbh200_readl(fusbh200, &fusbh200->regs->intr_enable));
+       temp = scnprintf (next, size, fmt, temp, scratch);
+       size -= temp;
+       next += temp;
+
+       temp = scnprintf (next, size, "uframe %04x\n",
+                       fusbh200_read_frame_index(fusbh200));
+       size -= temp;
+       next += temp;
+
+       if (fusbh200->async_unlink) {
+               temp = scnprintf(next, size, "async unlink qh %p\n",
+                               fusbh200->async_unlink);
+               size -= temp;
+               next += temp;
+       }
+
+#ifdef FUSBH200_STATS
+       temp = scnprintf (next, size,
+               "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+               fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
+               fusbh200->stats.lost_iaa);
+       size -= temp;
+       next += temp;
+
+       temp = scnprintf (next, size, "complete %ld unlink %ld\n",
+               fusbh200->stats.complete, fusbh200->stats.unlink);
+       size -= temp;
+       next += temp;
+#endif
+
+done:
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+       return buf->alloc_size - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
+                               ssize_t (*fill_func)(struct debug_buffer *))
+{
+       struct debug_buffer *buf;
+
+       buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+       if (buf) {
+               buf->bus = bus;
+               buf->fill_func = fill_func;
+               mutex_init(&buf->mutex);
+               buf->alloc_size = PAGE_SIZE;
+       }
+
+       return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+       int ret = 0;
+
+       if (!buf->output_buf)
+               buf->output_buf = vmalloc(buf->alloc_size);
+
+       if (!buf->output_buf) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = buf->fill_func(buf);
+
+       if (ret >= 0) {
+               buf->count = ret;
+               ret = 0;
+       }
+
+out:
+       return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+                           size_t len, loff_t *offset)
+{
+       struct debug_buffer *buf = file->private_data;
+       int ret = 0;
+
+       mutex_lock(&buf->mutex);
+       if (buf->count == 0) {
+               ret = fill_buffer(buf);
+               if (ret != 0) {
+                       mutex_unlock(&buf->mutex);
+                       goto out;
+               }
+       }
+       mutex_unlock(&buf->mutex);
+
+       ret = simple_read_from_buffer(user_buf, len, offset,
+                                     buf->output_buf, buf->count);
+
+out:
+       return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+       struct debug_buffer *buf = file->private_data;
+
+       if (buf) {
+               vfree(buf->output_buf);
+               kfree(buf);
+       }
+
+       return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+       file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+       return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+       struct debug_buffer *buf;
+       buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+       if (!buf)
+               return -ENOMEM;
+
+       buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+       file->private_data = buf;
+       return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+       file->private_data = alloc_buffer(inode->i_private,
+                                         fill_registers_buffer);
+
+       return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files (struct fusbh200_hcd *fusbh200)
+{
+       struct usb_bus *bus = &fusbh200_to_hcd(fusbh200)->self;
+
+       fusbh200->debug_dir = debugfs_create_dir(bus->bus_name, fusbh200_debug_root);
+       if (!fusbh200->debug_dir)
+               return;
+
+       if (!debugfs_create_file("async", S_IRUGO, fusbh200->debug_dir, bus,
+                                               &debug_async_fops))
+               goto file_error;
+
+       if (!debugfs_create_file("periodic", S_IRUGO, fusbh200->debug_dir, bus,
+                                               &debug_periodic_fops))
+               goto file_error;
+
+       if (!debugfs_create_file("registers", S_IRUGO, fusbh200->debug_dir, bus,
+                                                   &debug_registers_fops))
+               goto file_error;
+
+       return;
+
+file_error:
+       debugfs_remove_recursive(fusbh200->debug_dir);
+}
+
+static inline void remove_debug_files (struct fusbh200_hcd *fusbh200)
+{
+       debugfs_remove_recursive(fusbh200->debug_dir);
+}
+
+#endif /* STUB_DEBUG_FILES */
+/*-------------------------------------------------------------------------*/
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown:  shutting down the bridge before the devices using it.
+ */
+static int handshake (struct fusbh200_hcd *fusbh200, void __iomem *ptr,
+                     u32 mask, u32 done, int usec)
+{
+       u32     result;
+
+       do {
+               result = fusbh200_readl(fusbh200, ptr);
+               if (result == ~(u32)0)          /* card removed */
+                       return -ENODEV;
+               result &= mask;
+               if (result == done)
+                       return 0;
+               udelay (1);
+               usec--;
+       } while (usec > 0);
+       return -ETIMEDOUT;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fusbh200_halt (struct fusbh200_hcd *fusbh200)
+{
+       u32     temp;
+
+       spin_lock_irq(&fusbh200->lock);
+
+       /* disable any irqs left enabled by previous code */
+       fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+
+       /*
+        * This routine gets called during probe before fusbh200->command
+        * has been initialized, so we can't rely on its value.
+        */
+       fusbh200->command &= ~CMD_RUN;
+       temp = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+       temp &= ~(CMD_RUN | CMD_IAAD);
+       fusbh200_writel(fusbh200, temp, &fusbh200->regs->command);
+
+       spin_unlock_irq(&fusbh200->lock);
+       synchronize_irq(fusbh200_to_hcd(fusbh200)->irq);
+
+       return handshake(fusbh200, &fusbh200->regs->status,
+                         STS_HALT, STS_HALT, 16 * 125);
+}
+
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fusbh200_reset (struct fusbh200_hcd *fusbh200)
+{
+       int     retval;
+       u32     command = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+
+       /* If the EHCI debug controller is active, special care must be
+        * taken before and after a host controller reset */
+       if (fusbh200->debug && !dbgp_reset_prep(fusbh200_to_hcd(fusbh200)))
+               fusbh200->debug = NULL;
+
+       command |= CMD_RESET;
+       dbg_cmd (fusbh200, "reset", command);
+       fusbh200_writel(fusbh200, command, &fusbh200->regs->command);
+       fusbh200->rh_state = FUSBH200_RH_HALTED;
+       fusbh200->next_statechange = jiffies;
+       retval = handshake (fusbh200, &fusbh200->regs->command,
+                           CMD_RESET, 0, 250 * 1000);
+
+       if (retval)
+               return retval;
+
+       if (fusbh200->debug)
+               dbgp_external_startup(fusbh200_to_hcd(fusbh200));
+
+       fusbh200->port_c_suspend = fusbh200->suspended_ports =
+                       fusbh200->resuming_ports = 0;
+       return retval;
+}
+
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fusbh200_quiesce (struct fusbh200_hcd *fusbh200)
+{
+       u32     temp;
+
+       if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+               return;
+
+       /* wait for any schedule enables/disables to take effect */
+       temp = (fusbh200->command << 10) & (STS_ASS | STS_PSS);
+       handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, temp, 16 * 125);
+
+       /* then disable anything that's still active */
+       spin_lock_irq(&fusbh200->lock);
+       fusbh200->command &= ~(CMD_ASE | CMD_PSE);
+       fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+       spin_unlock_irq(&fusbh200->lock);
+
+       /* hardware can take 16 microframes to turn off ... */
+       handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, 0, 16 * 125);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void end_unlink_async(struct fusbh200_hcd *fusbh200);
+static void unlink_empty_async(struct fusbh200_hcd *fusbh200);
+static void fusbh200_work(struct fusbh200_hcd *fusbh200);
+static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void fusbh200_set_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
+{
+       fusbh200->command |= bit;
+       fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+
+       /* unblock posted write */
+       fusbh200_readl(fusbh200, &fusbh200->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void fusbh200_clear_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
+{
+       fusbh200->command &= ~bit;
+       fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+
+       /* unblock posted write */
+       fusbh200_readl(fusbh200, &fusbh200->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support...  Now using hrtimers.
+ *
+ * Lots of different events are triggered from fusbh200->hrtimer.  Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * fusbh200->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * fusbh200->next_hrtimer_event.  Whenever fusbh200->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay.  This doesn't
+ * matter, because none of the events are especially time-critical.  The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay.  In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum fusbh200_hrtimer_event in fusbh200.h.
+ */
+static unsigned event_delays_ns[] = {
+       1 * NSEC_PER_MSEC,      /* FUSBH200_HRTIMER_POLL_ASS */
+       1 * NSEC_PER_MSEC,      /* FUSBH200_HRTIMER_POLL_PSS */
+       1 * NSEC_PER_MSEC,      /* FUSBH200_HRTIMER_POLL_DEAD */
+       1125 * NSEC_PER_USEC,   /* FUSBH200_HRTIMER_UNLINK_INTR */
+       2 * NSEC_PER_MSEC,      /* FUSBH200_HRTIMER_FREE_ITDS */
+       6 * NSEC_PER_MSEC,      /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
+       10 * NSEC_PER_MSEC,     /* FUSBH200_HRTIMER_IAA_WATCHDOG */
+       10 * NSEC_PER_MSEC,     /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
+       15 * NSEC_PER_MSEC,     /* FUSBH200_HRTIMER_DISABLE_ASYNC */
+       100 * NSEC_PER_MSEC,    /* FUSBH200_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void fusbh200_enable_event(struct fusbh200_hcd *fusbh200, unsigned event,
+               bool resched)
+{
+       ktime_t         *timeout = &fusbh200->hr_timeouts[event];
+
+       if (resched)
+               *timeout = ktime_add(ktime_get(),
+                               ktime_set(0, event_delays_ns[event]));
+       fusbh200->enabled_hrtimer_events |= (1 << event);
+
+       /* Track only the lowest-numbered pending event */
+       if (event < fusbh200->next_hrtimer_event) {
+               fusbh200->next_hrtimer_event = event;
+               hrtimer_start_range_ns(&fusbh200->hrtimer, *timeout,
+                               NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+       }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void fusbh200_poll_ASS(struct fusbh200_hcd *fusbh200)
+{
+       unsigned        actual, want;
+
+       /* Don't enable anything if the controller isn't running (e.g., died) */
+       if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+               return;
+
+       want = (fusbh200->command & CMD_ASE) ? STS_ASS : 0;
+       actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_ASS;
+
+       if (want != actual) {
+
+               /* Poll again later, but give up after about 20 ms */
+               if (fusbh200->ASS_poll_count++ < 20) {
+                       fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_ASS, true);
+                       return;
+               }
+               fusbh200_dbg(fusbh200, "Waited too long for the async schedule status (%x/%x), giving up\n",
+                               want, actual);
+       }
+       fusbh200->ASS_poll_count = 0;
+
+       /* The status is up-to-date; restart or stop the schedule as needed */
+       if (want == 0) {        /* Stopped */
+               if (fusbh200->async_count > 0)
+                       fusbh200_set_command_bit(fusbh200, CMD_ASE);
+
+       } else {                /* Running */
+               if (fusbh200->async_count == 0) {
+
+                       /* Turn off the schedule after a while */
+                       fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_ASYNC,
+                                       true);
+               }
+       }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void fusbh200_disable_ASE(struct fusbh200_hcd *fusbh200)
+{
+       fusbh200_clear_command_bit(fusbh200, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void fusbh200_poll_PSS(struct fusbh200_hcd *fusbh200)
+{
+       unsigned        actual, want;
+
+       /* Don't do anything if the controller isn't running (e.g., died) */
+       if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+               return;
+
+       want = (fusbh200->command & CMD_PSE) ? STS_PSS : 0;
+       actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_PSS;
+
+       if (want != actual) {
+
+               /* Poll again later, but give up after about 20 ms */
+               if (fusbh200->PSS_poll_count++ < 20) {
+                       fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_PSS, true);
+                       return;
+               }
+               fusbh200_dbg(fusbh200, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+                               want, actual);
+       }
+       fusbh200->PSS_poll_count = 0;
+
+       /* The status is up-to-date; restart or stop the schedule as needed */
+       if (want == 0) {        /* Stopped */
+               if (fusbh200->periodic_count > 0)
+                       fusbh200_set_command_bit(fusbh200, CMD_PSE);
+
+       } else {                /* Running */
+               if (fusbh200->periodic_count == 0) {
+
+                       /* Turn off the schedule after a while */
+                       fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_PERIODIC,
+                                       true);
+               }
+       }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void fusbh200_disable_PSE(struct fusbh200_hcd *fusbh200)
+{
+       fusbh200_clear_command_bit(fusbh200, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void fusbh200_handle_controller_death(struct fusbh200_hcd *fusbh200)
+{
+       if (!(fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_HALT)) {
+
+               /* Give up after a few milliseconds */
+               if (fusbh200->died_poll_count++ < 5) {
+                       /* Try again later */
+                       fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_DEAD, true);
+                       return;
+               }
+               fusbh200_warn(fusbh200, "Waited too long for the controller to stop, giving up\n");
+       }
+
+       /* Clean up the mess */
+       fusbh200->rh_state = FUSBH200_RH_HALTED;
+       fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+       fusbh200_work(fusbh200);
+       end_unlink_async(fusbh200);
+
+       /* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void fusbh200_handle_intr_unlinks(struct fusbh200_hcd *fusbh200)
+{
+       bool            stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
+
+       /*
+        * Process all the QHs on the intr_unlink list that were added
+        * before the current unlink cycle began.  The list is in
+        * temporal order, so stop when we reach the first entry in the
+        * current cycle.  But if the root hub isn't running then
+        * process all the QHs on the list.
+        */
+       fusbh200->intr_unlinking = true;
+       while (fusbh200->intr_unlink) {
+               struct fusbh200_qh      *qh = fusbh200->intr_unlink;
+
+               if (!stopped && qh->unlink_cycle == fusbh200->intr_unlink_cycle)
+                       break;
+               fusbh200->intr_unlink = qh->unlink_next;
+               qh->unlink_next = NULL;
+               end_unlink_intr(fusbh200, qh);
+       }
+
+       /* Handle remaining entries later */
+       if (fusbh200->intr_unlink) {
+               fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
+               ++fusbh200->intr_unlink_cycle;
+       }
+       fusbh200->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct fusbh200_hcd *fusbh200)
+{
+       if (!(fusbh200->enabled_hrtimer_events & BIT(FUSBH200_HRTIMER_FREE_ITDS))) {
+               fusbh200->last_itd_to_free = list_entry(
+                               fusbh200->cached_itd_list.prev,
+                               struct fusbh200_itd, itd_list);
+               fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_FREE_ITDS, true);
+       }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct fusbh200_hcd *fusbh200)
+{
+       struct fusbh200_itd             *itd, *n;
+
+       if (fusbh200->rh_state < FUSBH200_RH_RUNNING) {
+               fusbh200->last_itd_to_free = NULL;
+       }
+
+       list_for_each_entry_safe(itd, n, &fusbh200->cached_itd_list, itd_list) {
+               list_del(&itd->itd_list);
+               dma_pool_free(fusbh200->itd_pool, itd, itd->itd_dma);
+               if (itd == fusbh200->last_itd_to_free)
+                       break;
+       }
+
+       if (!list_empty(&fusbh200->cached_itd_list))
+               start_free_itds(fusbh200);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void fusbh200_iaa_watchdog(struct fusbh200_hcd *fusbh200)
+{
+       if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
+               return;
+
+       /*
+        * Lost IAA irqs wedge things badly; seen first with a vt8235.
+        * So we need this watchdog, but must protect it against both
+        * (a) SMP races against real IAA firing and retriggering, and
+        * (b) clean HC shutdown, when IAA watchdog was pending.
+        */
+       if (fusbh200->async_iaa) {
+               u32 cmd, status;
+
+               /* If we get here, IAA is *REALLY* late.  It's barely
+                * conceivable that the system is so busy that CMD_IAAD
+                * is still legitimately set, so let's be sure it's
+                * clear before we read STS_IAA.  (The HC should clear
+                * CMD_IAAD when it sets STS_IAA.)
+                */
+               cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+
+               /*
+                * If IAA is set here it either legitimately triggered
+                * after the watchdog timer expired (_way_ late, so we'll
+                * still count it as lost) ... or a silicon erratum:
+                * - VIA seems to set IAA without triggering the IRQ;
+                * - IAAD potentially cleared without setting IAA.
+                */
+               status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
+               if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+                       COUNT(fusbh200->stats.lost_iaa);
+                       fusbh200_writel(fusbh200, STS_IAA, &fusbh200->regs->status);
+               }
+
+               fusbh200_vdbg(fusbh200, "IAA watchdog: status %x cmd %x\n",
+                               status, cmd);
+               end_unlink_async(fusbh200);
+       }
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct fusbh200_hcd *fusbh200)
+{
+       /* Not needed if the controller isn't running or it's already enabled */
+       if (fusbh200->rh_state != FUSBH200_RH_RUNNING ||
+                       (fusbh200->enabled_hrtimer_events &
+                               BIT(FUSBH200_HRTIMER_IO_WATCHDOG)))
+               return;
+
+       /*
+        * Isochronous transfers always need the watchdog.
+        * For other sorts we use it only if the flag is set.
+        */
+       if (fusbh200->isoc_count > 0 || (fusbh200->need_io_watchdog &&
+                       fusbh200->async_count + fusbh200->intr_count > 0))
+               fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IO_WATCHDOG, true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum fusbh200_hrtimer_event in fusbh200.h.
+ */
+static void (*event_handlers[])(struct fusbh200_hcd *) = {
+       fusbh200_poll_ASS,                      /* FUSBH200_HRTIMER_POLL_ASS */
+       fusbh200_poll_PSS,                      /* FUSBH200_HRTIMER_POLL_PSS */
+       fusbh200_handle_controller_death,       /* FUSBH200_HRTIMER_POLL_DEAD */
+       fusbh200_handle_intr_unlinks,   /* FUSBH200_HRTIMER_UNLINK_INTR */
+       end_free_itds,                  /* FUSBH200_HRTIMER_FREE_ITDS */
+       unlink_empty_async,             /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
+       fusbh200_iaa_watchdog,          /* FUSBH200_HRTIMER_IAA_WATCHDOG */
+       fusbh200_disable_PSE,           /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
+       fusbh200_disable_ASE,           /* FUSBH200_HRTIMER_DISABLE_ASYNC */
+       fusbh200_work,                  /* FUSBH200_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart fusbh200_hrtimer_func(struct hrtimer *t)
+{
+       struct fusbh200_hcd     *fusbh200 = container_of(t, struct fusbh200_hcd, hrtimer);
+       ktime_t         now;
+       unsigned long   events;
+       unsigned long   flags;
+       unsigned        e;
+
+       spin_lock_irqsave(&fusbh200->lock, flags);
+
+       events = fusbh200->enabled_hrtimer_events;
+       fusbh200->enabled_hrtimer_events = 0;
+       fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
+
+       /*
+        * Check each pending event.  If its time has expired, handle
+        * the event; otherwise re-enable it.
+        */
+       now = ktime_get();
+       for_each_set_bit(e, &events, FUSBH200_HRTIMER_NUM_EVENTS) {
+               if (now.tv64 >= fusbh200->hr_timeouts[e].tv64)
+                       event_handlers[e](fusbh200);
+               else
+                       fusbh200_enable_event(fusbh200, e, false);
+       }
+
+       spin_unlock_irqrestore(&fusbh200->lock, flags);
+       return HRTIMER_NORESTART;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fusbh200_bus_suspend   NULL
+#define fusbh200_bus_resume    NULL
+
+/*-------------------------------------------------------------------------*/
+
+static int check_reset_complete (
+       struct fusbh200_hcd     *fusbh200,
+       int             index,
+       u32 __iomem     *status_reg,
+       int             port_status
+) {
+       if (!(port_status & PORT_CONNECT))
+               return port_status;
+
+       /* if reset finished and it's still not enabled -- handoff */
+       if (!(port_status & PORT_PE)) {
+               /* with integrated TT, there's nobody to hand it to! */
+               fusbh200_dbg (fusbh200,
+                       "Failed to enable port %d on root hub TT\n",
+                       index+1);
+               return port_status;
+       } else {
+               fusbh200_dbg(fusbh200, "port %d reset complete, port enabled\n",
+                       index + 1);
+       }
+
+       return port_status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+fusbh200_hub_status_data (struct usb_hcd *hcd, char *buf)
+{
+       struct fusbh200_hcd     *fusbh200 = hcd_to_fusbh200 (hcd);
+       u32             temp, status;
+       u32             mask;
+       int             retval = 1;
+       unsigned long   flags;
+
+       /* init status to no-changes */
+       buf [0] = 0;
+
+       /* Inform the core about resumes-in-progress by returning
+        * a non-zero value even if there are no status changes.
+        */
+       status = fusbh200->resuming_ports;
+
+       mask = PORT_CSC | PORT_PEC;
+       // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
+
+       /* no hub change reports (bit 0) for now (power, ...) */
+
+       /* port N changes (bit N)? */
+       spin_lock_irqsave (&fusbh200->lock, flags);
+
+       temp = fusbh200_readl(fusbh200, &fusbh200->regs->port_status);
+
+       /*
+        * Return status information even for ports with OWNER set.
+        * Otherwise khubd wouldn't see the disconnect event when a
+        * high-speed device is switched over to the companion
+        * controller by the user.
+        */
+
+       if ((temp & mask) != 0 || test_bit(0, &fusbh200->port_c_suspend)
+                       || (fusbh200->reset_done[0] && time_after_eq(
+                               jiffies, fusbh200->reset_done[0]))) {
+               buf [0] |= 1 << 1;
+               status = STS_PCD;
+       }
+       /* FIXME autosuspend idle root hubs */
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       return status ? retval : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+fusbh200_hub_descriptor (
+       struct fusbh200_hcd             *fusbh200,
+       struct usb_hub_descriptor       *desc
+) {
+       int             ports = HCS_N_PORTS (fusbh200->hcs_params);
+       u16             temp;
+
+       desc->bDescriptorType = 0x29;
+       desc->bPwrOn2PwrGood = 10;      /* fusbh200 1.0, 2.3.9 says 20ms max */
+       desc->bHubContrCurrent = 0;
+
+       desc->bNbrPorts = ports;
+       temp = 1 + (ports / 8);
+       desc->bDescLength = 7 + 2 * temp;
+
+       /* two bitmaps:  ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+       memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+       memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+       temp = 0x0008;          /* per-port overcurrent reporting */
+       temp |= 0x0002;         /* no power switching */
+       desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fusbh200_hub_control (
+       struct usb_hcd  *hcd,
+       u16             typeReq,
+       u16             wValue,
+       u16             wIndex,
+       char            *buf,
+       u16             wLength
+) {
+       struct fusbh200_hcd     *fusbh200 = hcd_to_fusbh200 (hcd);
+       int             ports = HCS_N_PORTS (fusbh200->hcs_params);
+       u32 __iomem     *status_reg = &fusbh200->regs->port_status;
+       u32             temp, temp1, status;
+       unsigned long   flags;
+       int             retval = 0;
+       unsigned        selector;
+
+       /*
+        * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+        * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+        * (track current state ourselves) ... blink for diagnostics,
+        * power, "this is the one", etc.  EHCI spec supports this.
+        */
+
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       switch (typeReq) {
+       case ClearHubFeature:
+               switch (wValue) {
+               case C_HUB_LOCAL_POWER:
+               case C_HUB_OVER_CURRENT:
+                       /* no hub-wide feature/status flags */
+                       break;
+               default:
+                       goto error;
+               }
+               break;
+       case ClearPortFeature:
+               if (!wIndex || wIndex > ports)
+                       goto error;
+               wIndex--;
+               temp = fusbh200_readl(fusbh200, status_reg);
+               temp &= ~PORT_RWC_BITS;
+
+               /*
+                * Even if OWNER is set, so the port is owned by the
+                * companion controller, khubd needs to be able to clear
+                * the port-change status bits (especially
+                * USB_PORT_STAT_C_CONNECTION).
+                */
+
+               switch (wValue) {
+               case USB_PORT_FEAT_ENABLE:
+                       fusbh200_writel(fusbh200, temp & ~PORT_PE, status_reg);
+                       break;
+               case USB_PORT_FEAT_C_ENABLE:
+                       fusbh200_writel(fusbh200, temp | PORT_PEC, status_reg);
+                       break;
+               case USB_PORT_FEAT_SUSPEND:
+                       if (temp & PORT_RESET)
+                               goto error;
+                       if (!(temp & PORT_SUSPEND))
+                               break;
+                       if ((temp & PORT_PE) == 0)
+                               goto error;
+
+                       /* resume signaling for 20 msec */
+                       fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+                       fusbh200->reset_done[wIndex] = jiffies
+                                       + msecs_to_jiffies(20);
+                       break;
+               case USB_PORT_FEAT_C_SUSPEND:
+                       clear_bit(wIndex, &fusbh200->port_c_suspend);
+                       break;
+               case USB_PORT_FEAT_C_CONNECTION:
+                       fusbh200_writel(fusbh200, temp | PORT_CSC, status_reg);
+                       break;
+               case USB_PORT_FEAT_C_OVER_CURRENT:
+                       fusbh200_writel(fusbh200, temp | BMISR_OVC, &fusbh200->regs->bmisr);
+                       break;
+               case USB_PORT_FEAT_C_RESET:
+                       /* GetPortStatus clears reset */
+                       break;
+               default:
+                       goto error;
+               }
+               fusbh200_readl(fusbh200, &fusbh200->regs->command);     /* unblock posted write */
+               break;
+       case GetHubDescriptor:
+               fusbh200_hub_descriptor (fusbh200, (struct usb_hub_descriptor *)
+                       buf);
+               break;
+       case GetHubStatus:
+               /* no hub-wide feature/status flags */
+               memset (buf, 0, 4);
+               //cpu_to_le32s ((u32 *) buf);
+               break;
+       case GetPortStatus:
+               if (!wIndex || wIndex > ports)
+                       goto error;
+               wIndex--;
+               status = 0;
+               temp = fusbh200_readl(fusbh200, status_reg);
+
+               // wPortChange bits
+               if (temp & PORT_CSC)
+                       status |= USB_PORT_STAT_C_CONNECTION << 16;
+               if (temp & PORT_PEC)
+                       status |= USB_PORT_STAT_C_ENABLE << 16;
+
+               temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
+               if (temp1 & BMISR_OVC)
+                       status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+               /* whoever resumes must GetPortStatus to complete it!! */
+               if (temp & PORT_RESUME) {
+
+                       /* Remote Wakeup received? */
+                       if (!fusbh200->reset_done[wIndex]) {
+                               /* resume signaling for 20 msec */
+                               fusbh200->reset_done[wIndex] = jiffies
+                                               + msecs_to_jiffies(20);
+                               /* check the port again */
+                               mod_timer(&fusbh200_to_hcd(fusbh200)->rh_timer,
+                                               fusbh200->reset_done[wIndex]);
+                       }
+
+                       /* resume completed? */
+                       else if (time_after_eq(jiffies,
+                                       fusbh200->reset_done[wIndex])) {
+                               clear_bit(wIndex, &fusbh200->suspended_ports);
+                               set_bit(wIndex, &fusbh200->port_c_suspend);
+                               fusbh200->reset_done[wIndex] = 0;
+
+                               /* stop resume signaling */
+                               temp = fusbh200_readl(fusbh200, status_reg);
+                               fusbh200_writel(fusbh200,
+                                       temp & ~(PORT_RWC_BITS | PORT_RESUME),
+                                       status_reg);
+                               clear_bit(wIndex, &fusbh200->resuming_ports);
+                               retval = handshake(fusbh200, status_reg,
+                                          PORT_RESUME, 0, 2000 /* 2msec */);
+                               if (retval != 0) {
+                                       fusbh200_err(fusbh200,
+                                               "port %d resume error %d\n",
+                                               wIndex + 1, retval);
+                                       goto error;
+                               }
+                               temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+                       }
+               }
+
+               /* whoever resets must GetPortStatus to complete it!! */
+               if ((temp & PORT_RESET)
+                               && time_after_eq(jiffies,
+                                       fusbh200->reset_done[wIndex])) {
+                       status |= USB_PORT_STAT_C_RESET << 16;
+                       fusbh200->reset_done [wIndex] = 0;
+                       clear_bit(wIndex, &fusbh200->resuming_ports);
+
+                       /* force reset to complete */
+                       fusbh200_writel(fusbh200, temp & ~(PORT_RWC_BITS | PORT_RESET),
+                                       status_reg);
+                       /* REVISIT:  some hardware needs 550+ usec to clear
+                        * this bit; seems too long to spin routinely...
+                        */
+                       retval = handshake(fusbh200, status_reg,
+                                       PORT_RESET, 0, 1000);
+                       if (retval != 0) {
+                               fusbh200_err (fusbh200, "port %d reset error %d\n",
+                                       wIndex + 1, retval);
+                               goto error;
+                       }
+
+                       /* see what we found out */
+                       temp = check_reset_complete (fusbh200, wIndex, status_reg,
+                                       fusbh200_readl(fusbh200, status_reg));
+               }
+
+               if (!(temp & (PORT_RESUME|PORT_RESET))) {
+                       fusbh200->reset_done[wIndex] = 0;
+                       clear_bit(wIndex, &fusbh200->resuming_ports);
+               }
+
+               /* transfer dedicated ports to the companion hc */
+               if ((temp & PORT_CONNECT) &&
+                               test_bit(wIndex, &fusbh200->companion_ports)) {
+                       temp &= ~PORT_RWC_BITS;
+                       fusbh200_writel(fusbh200, temp, status_reg);
+                       fusbh200_dbg(fusbh200, "port %d --> companion\n", wIndex + 1);
+                       temp = fusbh200_readl(fusbh200, status_reg);
+               }
+
+               /*
+                * Even if OWNER is set, there's no harm letting khubd
+                * see the wPortStatus values (they should all be 0 except
+                * for PORT_POWER anyway).
+                */
+
+               if (temp & PORT_CONNECT) {
+                       status |= USB_PORT_STAT_CONNECTION;
+                       status |= fusbh200_port_speed(fusbh200, temp);
+               }
+               if (temp & PORT_PE)
+                       status |= USB_PORT_STAT_ENABLE;
+
+               /* maybe the port was unsuspended without our knowledge */
+               if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+                       status |= USB_PORT_STAT_SUSPEND;
+               } else if (test_bit(wIndex, &fusbh200->suspended_ports)) {
+                       clear_bit(wIndex, &fusbh200->suspended_ports);
+                       clear_bit(wIndex, &fusbh200->resuming_ports);
+                       fusbh200->reset_done[wIndex] = 0;
+                       if (temp & PORT_PE)
+                               set_bit(wIndex, &fusbh200->port_c_suspend);
+               }
+
+               temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
+               if (temp1 & BMISR_OVC)
+                       status |= USB_PORT_STAT_OVERCURRENT;
+               if (temp & PORT_RESET)
+                       status |= USB_PORT_STAT_RESET;
+               if (test_bit(wIndex, &fusbh200->port_c_suspend))
+                       status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+#ifndef        VERBOSE_DEBUG
+       if (status & ~0xffff)   /* only if wPortChange is interesting */
+#endif
+               dbg_port (fusbh200, "GetStatus", wIndex + 1, temp);
+               put_unaligned_le32(status, buf);
+               break;
+       case SetHubFeature:
+               switch (wValue) {
+               case C_HUB_LOCAL_POWER:
+               case C_HUB_OVER_CURRENT:
+                       /* no hub-wide feature/status flags */
+                       break;
+               default:
+                       goto error;
+               }
+               break;
+       case SetPortFeature:
+               selector = wIndex >> 8;
+               wIndex &= 0xff;
+
+               if (!wIndex || wIndex > ports)
+                       goto error;
+               wIndex--;
+               temp = fusbh200_readl(fusbh200, status_reg);
+               temp &= ~PORT_RWC_BITS;
+               switch (wValue) {
+               case USB_PORT_FEAT_SUSPEND:
+                       if ((temp & PORT_PE) == 0
+                                       || (temp & PORT_RESET) != 0)
+                               goto error;
+
+                       /* After above check the port must be connected.
+                        * Set appropriate bit thus could put phy into low power
+                        * mode if we have hostpc feature
+                        */
+                       fusbh200_writel(fusbh200, temp | PORT_SUSPEND, status_reg);
+                       set_bit(wIndex, &fusbh200->suspended_ports);
+                       break;
+               case USB_PORT_FEAT_RESET:
+                       if (temp & PORT_RESUME)
+                               goto error;
+                       /* line status bits may report this as low speed,
+                        * which can be fine if this root hub has a
+                        * transaction translator built in.
+                        */
+                       fusbh200_vdbg (fusbh200, "port %d reset\n", wIndex + 1);
+                       temp |= PORT_RESET;
+                       temp &= ~PORT_PE;
+
+                       /*
+                        * caller must wait, then call GetPortStatus
+                        * usb 2.0 spec says 50 ms resets on root
+                        */
+                       fusbh200->reset_done [wIndex] = jiffies
+                                       + msecs_to_jiffies (50);
+                       fusbh200_writel(fusbh200, temp, status_reg);
+                       break;
+
+               /* For downstream facing ports (these):  one hub port is put
+                * into test mode according to USB2 11.24.2.13, then the hub
+                * must be reset (which for root hub now means rmmod+modprobe,
+                * or else system reboot).  See EHCI 2.3.9 and 4.14 for info
+                * about the EHCI-specific stuff.
+                */
+               case USB_PORT_FEAT_TEST:
+                       if (!selector || selector > 5)
+                               goto error;
+                       spin_unlock_irqrestore(&fusbh200->lock, flags);
+                       fusbh200_quiesce(fusbh200);
+                       spin_lock_irqsave(&fusbh200->lock, flags);
+
+                       /* Put all enabled ports into suspend */
+                       temp = fusbh200_readl(fusbh200, status_reg) & ~PORT_RWC_BITS;
+                       if (temp & PORT_PE)
+                               fusbh200_writel(fusbh200, temp | PORT_SUSPEND,
+                                               status_reg);
+
+                       spin_unlock_irqrestore(&fusbh200->lock, flags);
+                       fusbh200_halt(fusbh200);
+                       spin_lock_irqsave(&fusbh200->lock, flags);
+
+                       temp = fusbh200_readl(fusbh200, status_reg);
+                       temp |= selector << 16;
+                       fusbh200_writel(fusbh200, temp, status_reg);
+                       break;
+
+               default:
+                       goto error;
+               }
+               fusbh200_readl(fusbh200, &fusbh200->regs->command);     /* unblock posted writes */
+               break;
+
+       default:
+error:
+               /* "stall" on error */
+               retval = -EPIPE;
+       }
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       return retval;
+}
+
+static void __maybe_unused fusbh200_relinquish_port(struct usb_hcd *hcd,
+               int portnum)
+{
+       return;
+}
+
+static int __maybe_unused fusbh200_port_handed_over(struct usb_hcd *hcd,
+               int portnum)
+{
+       return 0;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * There's basically three types of memory:
+ *     - data used only by the HCD ... kmalloc is fine
+ *     - async and periodic schedules, shared by HC and HCD ... these
+ *       need to use dma_pool or dma_alloc_coherent
+ *     - driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* Allocate the key transfer structures from the previously allocated pool */
+
+static inline void fusbh200_qtd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd,
+                                 dma_addr_t dma)
+{
+       memset (qtd, 0, sizeof *qtd);
+       qtd->qtd_dma = dma;
+       qtd->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
+       qtd->hw_next = FUSBH200_LIST_END(fusbh200);
+       qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+       INIT_LIST_HEAD (&qtd->qtd_list);
+}
+
+static struct fusbh200_qtd *fusbh200_qtd_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+       struct fusbh200_qtd             *qtd;
+       dma_addr_t              dma;
+
+       qtd = dma_pool_alloc (fusbh200->qtd_pool, flags, &dma);
+       if (qtd != NULL) {
+               fusbh200_qtd_init(fusbh200, qtd, dma);
+       }
+       return qtd;
+}
+
+static inline void fusbh200_qtd_free (struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
+{
+       dma_pool_free (fusbh200->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       /* clean qtds first, and know this is not linked */
+       if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
+               fusbh200_dbg (fusbh200, "unused qh not empty!\n");
+               BUG ();
+       }
+       if (qh->dummy)
+               fusbh200_qtd_free (fusbh200, qh->dummy);
+       dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
+       kfree(qh);
+}
+
+static struct fusbh200_qh *fusbh200_qh_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+       struct fusbh200_qh              *qh;
+       dma_addr_t              dma;
+
+       qh = kzalloc(sizeof *qh, GFP_ATOMIC);
+       if (!qh)
+               goto done;
+       qh->hw = (struct fusbh200_qh_hw *)
+               dma_pool_alloc(fusbh200->qh_pool, flags, &dma);
+       if (!qh->hw)
+               goto fail;
+       memset(qh->hw, 0, sizeof *qh->hw);
+       qh->qh_dma = dma;
+       // INIT_LIST_HEAD (&qh->qh_list);
+       INIT_LIST_HEAD (&qh->qtd_list);
+
+       /* dummy td enables safe urb queuing */
+       qh->dummy = fusbh200_qtd_alloc (fusbh200, flags);
+       if (qh->dummy == NULL) {
+               fusbh200_dbg (fusbh200, "no dummy td\n");
+               goto fail1;
+       }
+done:
+       return qh;
+fail1:
+       dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
+fail:
+       kfree(qh);
+       return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void fusbh200_mem_cleanup (struct fusbh200_hcd *fusbh200)
+{
+       if (fusbh200->async)
+               qh_destroy(fusbh200, fusbh200->async);
+       fusbh200->async = NULL;
+
+       if (fusbh200->dummy)
+               qh_destroy(fusbh200, fusbh200->dummy);
+       fusbh200->dummy = NULL;
+
+       /* DMA consistent memory and pools */
+       if (fusbh200->qtd_pool)
+               dma_pool_destroy (fusbh200->qtd_pool);
+       fusbh200->qtd_pool = NULL;
+
+       if (fusbh200->qh_pool) {
+               dma_pool_destroy (fusbh200->qh_pool);
+               fusbh200->qh_pool = NULL;
+       }
+
+       if (fusbh200->itd_pool)
+               dma_pool_destroy (fusbh200->itd_pool);
+       fusbh200->itd_pool = NULL;
+
+       if (fusbh200->periodic)
+               dma_free_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
+                       fusbh200->periodic_size * sizeof (u32),
+                       fusbh200->periodic, fusbh200->periodic_dma);
+       fusbh200->periodic = NULL;
+
+       /* shadow periodic table */
+       kfree(fusbh200->pshadow);
+       fusbh200->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int fusbh200_mem_init (struct fusbh200_hcd *fusbh200, gfp_t flags)
+{
+       int i;
+
+       /* QTDs for control/bulk/intr transfers */
+       fusbh200->qtd_pool = dma_pool_create ("fusbh200_qtd",
+                       fusbh200_to_hcd(fusbh200)->self.controller,
+                       sizeof (struct fusbh200_qtd),
+                       32 /* byte alignment (for hw parts) */,
+                       4096 /* can't cross 4K */);
+       if (!fusbh200->qtd_pool) {
+               goto fail;
+       }
+
+       /* QHs for control/bulk/intr transfers */
+       fusbh200->qh_pool = dma_pool_create ("fusbh200_qh",
+                       fusbh200_to_hcd(fusbh200)->self.controller,
+                       sizeof(struct fusbh200_qh_hw),
+                       32 /* byte alignment (for hw parts) */,
+                       4096 /* can't cross 4K */);
+       if (!fusbh200->qh_pool) {
+               goto fail;
+       }
+       fusbh200->async = fusbh200_qh_alloc (fusbh200, flags);
+       if (!fusbh200->async) {
+               goto fail;
+       }
+
+       /* ITD for high speed ISO transfers */
+       fusbh200->itd_pool = dma_pool_create ("fusbh200_itd",
+                       fusbh200_to_hcd(fusbh200)->self.controller,
+                       sizeof (struct fusbh200_itd),
+                       64 /* byte alignment (for hw parts) */,
+                       4096 /* can't cross 4K */);
+       if (!fusbh200->itd_pool) {
+               goto fail;
+       }
+
+       /* Hardware periodic table */
+       fusbh200->periodic = (__le32 *)
+               dma_alloc_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
+                       fusbh200->periodic_size * sizeof(__le32),
+                       &fusbh200->periodic_dma, 0);
+       if (fusbh200->periodic == NULL) {
+               goto fail;
+       }
+
+               for (i = 0; i < fusbh200->periodic_size; i++)
+                       fusbh200->periodic[i] = FUSBH200_LIST_END(fusbh200);
+
+       /* software shadow of hardware table */
+       fusbh200->pshadow = kcalloc(fusbh200->periodic_size, sizeof(void *), flags);
+       if (fusbh200->pshadow != NULL)
+               return 0;
+
+fail:
+       fusbh200_dbg (fusbh200, "couldn't init memory\n");
+       fusbh200_mem_cleanup (fusbh200);
+       return -ENOMEM;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI hardware queue manipulation ... the core.  QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number).  We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint.  URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd) records, and (along with
+ * interrupts) needs careful scheduling.  Performance improvements can be
+ * an ongoing challenge.  That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries.  TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+
+static int
+qtd_fill(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd, dma_addr_t buf,
+                 size_t len, int token, int maxpacket)
+{
+       int     i, count;
+       u64     addr = buf;
+
+       /* one buffer entry per 4K ... first might be short or unaligned */
+       qtd->hw_buf[0] = cpu_to_hc32(fusbh200, (u32)addr);
+       qtd->hw_buf_hi[0] = cpu_to_hc32(fusbh200, (u32)(addr >> 32));
+       count = 0x1000 - (buf & 0x0fff);        /* rest of that page */
+       if (likely (len < count))               /* ... iff needed */
+               count = len;
+       else {
+               buf +=  0x1000;
+               buf &= ~0x0fff;
+
+               /* per-qtd limit: from 16K to 20K (best alignment) */
+               for (i = 1; count < len && i < 5; i++) {
+                       addr = buf;
+                       qtd->hw_buf[i] = cpu_to_hc32(fusbh200, (u32)addr);
+                       qtd->hw_buf_hi[i] = cpu_to_hc32(fusbh200,
+                                       (u32)(addr >> 32));
+                       buf += 0x1000;
+                       if ((count + 0x1000) < len)
+                               count += 0x1000;
+                       else
+                               count = len;
+               }
+
+               /* short packets may only terminate transfers */
+               if (count != len)
+                       count -= (count % maxpacket);
+       }
+       qtd->hw_token = cpu_to_hc32(fusbh200, (count << 16) | token);
+       qtd->length = count;
+
+       return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+qh_update (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh, struct fusbh200_qtd *qtd)
+{
+       struct fusbh200_qh_hw *hw = qh->hw;
+
+       /* writes to an active overlay are unsafe */
+       BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+       hw->hw_qtd_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+       hw->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+
+       /* Except for control endpoints, we make hardware maintain data
+        * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+        * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+        * ever clear it.
+        */
+       if (!(hw->hw_info1 & cpu_to_hc32(fusbh200, QH_TOGGLE_CTL))) {
+               unsigned        is_out, epnum;
+
+               is_out = qh->is_out;
+               epnum = (hc32_to_cpup(fusbh200, &hw->hw_info1) >> 8) & 0x0f;
+               if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
+                       hw->hw_token &= ~cpu_to_hc32(fusbh200, QTD_TOGGLE);
+                       usb_settoggle (qh->dev, epnum, is_out, 1);
+               }
+       }
+
+       hw->hw_token &= cpu_to_hc32(fusbh200, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void
+qh_refresh (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       struct fusbh200_qtd *qtd;
+
+       if (list_empty (&qh->qtd_list))
+               qtd = qh->dummy;
+       else {
+               qtd = list_entry (qh->qtd_list.next,
+                               struct fusbh200_qtd, qtd_list);
+               /*
+                * first qtd may already be partially processed.
+                * If we come here during unlink, the QH overlay region
+                * might have reference to the just unlinked qtd. The
+                * qtd is updated in qh_completions(). Update the QH
+                * overlay here.
+                */
+               if (cpu_to_hc32(fusbh200, qtd->qtd_dma) == qh->hw->hw_current) {
+                       qh->hw->hw_qtd_next = qtd->hw_next;
+                       qtd = NULL;
+               }
+       }
+
+       if (qtd)
+               qh_update (fusbh200, qh, qtd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void qh_link_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+static void fusbh200_clear_tt_buffer_complete(struct usb_hcd *hcd,
+               struct usb_host_endpoint *ep)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200(hcd);
+       struct fusbh200_qh              *qh = ep->hcpriv;
+       unsigned long           flags;
+
+       spin_lock_irqsave(&fusbh200->lock, flags);
+       qh->clearing_tt = 0;
+       if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+                       && fusbh200->rh_state == FUSBH200_RH_RUNNING)
+               qh_link_async(fusbh200, qh);
+       spin_unlock_irqrestore(&fusbh200->lock, flags);
+}
+
+static void fusbh200_clear_tt_buffer(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh,
+               struct urb *urb, u32 token)
+{
+
+       /* If an async split transaction gets an error or is unlinked,
+        * the TT buffer may be left in an indeterminate state.  We
+        * have to clear the TT buffer.
+        *
+        * Note: this routine is never called for Isochronous transfers.
+        */
+       if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+#ifdef DEBUG
+               struct usb_device *tt = urb->dev->tt->hub;
+               dev_dbg(&tt->dev,
+                       "clear tt buffer port %d, a%d ep%d t%08x\n",
+                       urb->dev->ttport, urb->dev->devnum,
+                       usb_pipeendpoint(urb->pipe), token);
+#endif /* DEBUG */
+               if (urb->dev->tt->hub !=
+                   fusbh200_to_hcd(fusbh200)->self.root_hub) {
+                       if (usb_hub_clear_tt_buffer(urb) == 0)
+                               qh->clearing_tt = 1;
+               }
+       }
+}
+
+static int qtd_copy_status (
+       struct fusbh200_hcd *fusbh200,
+       struct urb *urb,
+       size_t length,
+       u32 token
+)
+{
+       int     status = -EINPROGRESS;
+
+       /* count IN/OUT bytes, not SETUP (even short packets) */
+       if (likely (QTD_PID (token) != 2))
+               urb->actual_length += length - QTD_LENGTH (token);
+
+       /* don't modify error codes */
+       if (unlikely(urb->unlinked))
+               return status;
+
+       /* force cleanup after short read; not always an error */
+       if (unlikely (IS_SHORT_READ (token)))
+               status = -EREMOTEIO;
+
+       /* serious "can't proceed" faults reported by the hardware */
+       if (token & QTD_STS_HALT) {
+               if (token & QTD_STS_BABBLE) {
+                       /* FIXME "must" disable babbling device's port too */
+                       status = -EOVERFLOW;
+               /* CERR nonzero + halt --> stall */
+               } else if (QTD_CERR(token)) {
+                       status = -EPIPE;
+
+               /* In theory, more than one of the following bits can be set
+                * since they are sticky and the transaction is retried.
+                * Which to test first is rather arbitrary.
+                */
+               } else if (token & QTD_STS_MMF) {
+                       /* fs/ls interrupt xfer missed the complete-split */
+                       status = -EPROTO;
+               } else if (token & QTD_STS_DBE) {
+                       status = (QTD_PID (token) == 1) /* IN ? */
+                               ? -ENOSR  /* hc couldn't read data */
+                               : -ECOMM; /* hc couldn't write data */
+               } else if (token & QTD_STS_XACT) {
+                       /* timeout, bad CRC, wrong PID, etc */
+                       fusbh200_dbg(fusbh200, "devpath %s ep%d%s 3strikes\n",
+                               urb->dev->devpath,
+                               usb_pipeendpoint(urb->pipe),
+                               usb_pipein(urb->pipe) ? "in" : "out");
+                       status = -EPROTO;
+               } else {        /* unknown */
+                       status = -EPROTO;
+               }
+
+               fusbh200_vdbg (fusbh200,
+                       "dev%d ep%d%s qtd token %08x --> status %d\n",
+                       usb_pipedevice (urb->pipe),
+                       usb_pipeendpoint (urb->pipe),
+                       usb_pipein (urb->pipe) ? "in" : "out",
+                       token, status);
+       }
+
+       return status;
+}
+
+static void
+fusbh200_urb_done(struct fusbh200_hcd *fusbh200, struct urb *urb, int status)
+__releases(fusbh200->lock)
+__acquires(fusbh200->lock)
+{
+       if (likely (urb->hcpriv != NULL)) {
+               struct fusbh200_qh      *qh = (struct fusbh200_qh *) urb->hcpriv;
+
+               /* S-mask in a QH means it's an interrupt urb */
+               if ((qh->hw->hw_info2 & cpu_to_hc32(fusbh200, QH_SMASK)) != 0) {
+
+                       /* ... update hc-wide periodic stats (for usbfs) */
+                       fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs--;
+               }
+       }
+
+       if (unlikely(urb->unlinked)) {
+               COUNT(fusbh200->stats.unlink);
+       } else {
+               /* report non-error and short read status as zero */
+               if (status == -EINPROGRESS || status == -EREMOTEIO)
+                       status = 0;
+               COUNT(fusbh200->stats.complete);
+       }
+
+#ifdef FUSBH200_URB_TRACE
+       fusbh200_dbg (fusbh200,
+               "%s %s urb %p ep%d%s status %d len %d/%d\n",
+               __func__, urb->dev->devpath, urb,
+               usb_pipeendpoint (urb->pipe),
+               usb_pipein (urb->pipe) ? "in" : "out",
+               status,
+               urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+       /* complete() can reenter this HCD */
+       usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+       spin_unlock (&fusbh200->lock);
+       usb_hcd_giveback_urb(fusbh200_to_hcd(fusbh200), urb, status);
+       spin_lock (&fusbh200->lock);
+}
+
+static int qh_schedule (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
+
+/*
+ * Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current.  Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned
+qh_completions (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       struct fusbh200_qtd             *last, *end = qh->dummy;
+       struct list_head        *entry, *tmp;
+       int                     last_status;
+       int                     stopped;
+       unsigned                count = 0;
+       u8                      state;
+       struct fusbh200_qh_hw   *hw = qh->hw;
+
+       if (unlikely (list_empty (&qh->qtd_list)))
+               return count;
+
+       /* completions (or tasks on other cpus) must never clobber HALT
+        * till we've gone through and cleaned everything up, even when
+        * they add urbs to this qh's queue or mark them for unlinking.
+        *
+        * NOTE:  unlinking expects to be done in queue order.
+        *
+        * It's a bug for qh->qh_state to be anything other than
+        * QH_STATE_IDLE, unless our caller is scan_async() or
+        * scan_intr().
+        */
+       state = qh->qh_state;
+       qh->qh_state = QH_STATE_COMPLETING;
+       stopped = (state == QH_STATE_IDLE);
+
+ rescan:
+       last = NULL;
+       last_status = -EINPROGRESS;
+       qh->needs_rescan = 0;
+
+       /* remove de-activated QTDs from front of queue.
+        * after faults (including short reads), cleanup this urb
+        * then let the queue advance.
+        * if queue is stopped, handles unlinks.
+        */
+       list_for_each_safe (entry, tmp, &qh->qtd_list) {
+               struct fusbh200_qtd     *qtd;
+               struct urb      *urb;
+               u32             token = 0;
+
+               qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
+               urb = qtd->urb;
+
+               /* clean up any state from previous QTD ...*/
+               if (last) {
+                       if (likely (last->urb != urb)) {
+                               fusbh200_urb_done(fusbh200, last->urb, last_status);
+                               count++;
+                               last_status = -EINPROGRESS;
+                       }
+                       fusbh200_qtd_free (fusbh200, last);
+                       last = NULL;
+               }
+
+               /* ignore urbs submitted during completions we reported */
+               if (qtd == end)
+                       break;
+
+               /* hardware copies qtd out of qh overlay */
+               rmb ();
+               token = hc32_to_cpu(fusbh200, qtd->hw_token);
+
+               /* always clean up qtds the hc de-activated */
+ retry_xacterr:
+               if ((token & QTD_STS_ACTIVE) == 0) {
+
+                       /* Report Data Buffer Error: non-fatal but useful */
+                       if (token & QTD_STS_DBE)
+                               fusbh200_dbg(fusbh200,
+                                       "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+                                       urb,
+                                       usb_endpoint_num(&urb->ep->desc),
+                                       usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+                                       urb->transfer_buffer_length,
+                                       qtd,
+                                       qh);
+
+                       /* on STALL, error, and short reads this urb must
+                        * complete and all its qtds must be recycled.
+                        */
+                       if ((token & QTD_STS_HALT) != 0) {
+
+                               /* retry transaction errors until we
+                                * reach the software xacterr limit
+                                */
+                               if ((token & QTD_STS_XACT) &&
+                                               QTD_CERR(token) == 0 &&
+                                               ++qh->xacterrs < QH_XACTERR_MAX &&
+                                               !urb->unlinked) {
+                                       fusbh200_dbg(fusbh200,
+       "detected XactErr len %zu/%zu retry %d\n",
+       qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+                                       /* reset the token in the qtd and the
+                                        * qh overlay (which still contains
+                                        * the qtd) so that we pick up from
+                                        * where we left off
+                                        */
+                                       token &= ~QTD_STS_HALT;
+                                       token |= QTD_STS_ACTIVE |
+                                                       (FUSBH200_TUNE_CERR << 10);
+                                       qtd->hw_token = cpu_to_hc32(fusbh200,
+                                                       token);
+                                       wmb();
+                                       hw->hw_token = cpu_to_hc32(fusbh200,
+                                                       token);
+                                       goto retry_xacterr;
+                               }
+                               stopped = 1;
+
+                       /* magic dummy for some short reads; qh won't advance.
+                        * that silicon quirk can kick in with this dummy too.
+                        *
+                        * other short reads won't stop the queue, including
+                        * control transfers (status stage handles that) or
+                        * most other single-qtd reads ... the queue stops if
+                        * URB_SHORT_NOT_OK was set so the driver submitting
+                        * the urbs could clean it up.
+                        */
+                       } else if (IS_SHORT_READ (token)
+                                       && !(qtd->hw_alt_next
+                                               & FUSBH200_LIST_END(fusbh200))) {
+                               stopped = 1;
+                       }
+
+               /* stop scanning when we reach qtds the hc is using */
+               } else if (likely (!stopped
+                               && fusbh200->rh_state >= FUSBH200_RH_RUNNING)) {
+                       break;
+
+               /* scan the whole queue for unlinks whenever it stops */
+               } else {
+                       stopped = 1;
+
+                       /* cancel everything if we halt, suspend, etc */
+                       if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+                               last_status = -ESHUTDOWN;
+
+                       /* this qtd is active; skip it unless a previous qtd
+                        * for its urb faulted, or its urb was canceled.
+                        */
+                       else if (last_status == -EINPROGRESS && !urb->unlinked)
+                               continue;
+
+                       /* qh unlinked; token in overlay may be most current */
+                       if (state == QH_STATE_IDLE
+                                       && cpu_to_hc32(fusbh200, qtd->qtd_dma)
+                                               == hw->hw_current) {
+                               token = hc32_to_cpu(fusbh200, hw->hw_token);
+
+                               /* An unlink may leave an incomplete
+                                * async transaction in the TT buffer.
+                                * We have to clear it.
+                                */
+                               fusbh200_clear_tt_buffer(fusbh200, qh, urb, token);
+                       }
+               }
+
+               /* unless we already know the urb's status, collect qtd status
+                * and update count of bytes transferred.  in common short read
+                * cases with only one data qtd (including control transfers),
+                * queue processing won't halt.  but with two or more qtds (for
+                * example, with a 32 KB transfer), when the first qtd gets a
+                * short read the second must be removed by hand.
+                */
+               if (last_status == -EINPROGRESS) {
+                       last_status = qtd_copy_status(fusbh200, urb,
+                                       qtd->length, token);
+                       if (last_status == -EREMOTEIO
+                                       && (qtd->hw_alt_next
+                                               & FUSBH200_LIST_END(fusbh200)))
+                               last_status = -EINPROGRESS;
+
+                       /* As part of low/full-speed endpoint-halt processing
+                        * we must clear the TT buffer (11.17.5).
+                        */
+                       if (unlikely(last_status != -EINPROGRESS &&
+                                       last_status != -EREMOTEIO)) {
+                               /* The TT's in some hubs malfunction when they
+                                * receive this request following a STALL (they
+                                * stop sending isochronous packets).  Since a
+                                * STALL can't leave the TT buffer in a busy
+                                * state (if you believe Figures 11-48 - 11-51
+                                * in the USB 2.0 spec), we won't clear the TT
+                                * buffer in this case.  Strictly speaking this
+                                * is a violation of the spec.
+                                */
+                               if (last_status != -EPIPE)
+                                       fusbh200_clear_tt_buffer(fusbh200, qh, urb,
+                                                       token);
+                       }
+               }
+
+               /* if we're removing something not at the queue head,
+                * patch the hardware queue pointer.
+                */
+               if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+                       last = list_entry (qtd->qtd_list.prev,
+                                       struct fusbh200_qtd, qtd_list);
+                       last->hw_next = qtd->hw_next;
+               }
+
+               /* remove qtd; it's recycled after possible urb completion */
+               list_del (&qtd->qtd_list);
+               last = qtd;
+
+               /* reinit the xacterr counter for the next qtd */
+               qh->xacterrs = 0;
+       }
+
+       /* last urb's completion might still need calling */
+       if (likely (last != NULL)) {
+               fusbh200_urb_done(fusbh200, last->urb, last_status);
+               count++;
+               fusbh200_qtd_free (fusbh200, last);
+       }
+
+       /* Do we need to rescan for URBs dequeued during a giveback? */
+       if (unlikely(qh->needs_rescan)) {
+               /* If the QH is already unlinked, do the rescan now. */
+               if (state == QH_STATE_IDLE)
+                       goto rescan;
+
+               /* Otherwise we have to wait until the QH is fully unlinked.
+                * Our caller will start an unlink if qh->needs_rescan is
+                * set.  But if an unlink has already started, nothing needs
+                * to be done.
+                */
+               if (state != QH_STATE_LINKED)
+                       qh->needs_rescan = 0;
+       }
+
+       /* restore original state; caller must unlink or relink */
+       qh->qh_state = state;
+
+       /* be sure the hardware's done with the qh before refreshing
+        * it after fault cleanup, or recovering from silicon wrongly
+        * overlaying the dummy qtd (which reduces DMA chatter).
+        */
+       if (stopped != 0 || hw->hw_qtd_next == FUSBH200_LIST_END(fusbh200)) {
+               switch (state) {
+               case QH_STATE_IDLE:
+                       qh_refresh(fusbh200, qh);
+                       break;
+               case QH_STATE_LINKED:
+                       /* We won't refresh a QH that's linked (after the HC
+                        * stopped the queue).  That avoids a race:
+                        *  - HC reads first part of QH;
+                        *  - CPU updates that first part and the token;
+                        *  - HC reads rest of that QH, including token
+                        * Result:  HC gets an inconsistent image, and then
+                        * DMAs to/from the wrong memory (corrupting it).
+                        *
+                        * That should be rare for interrupt transfers,
+                        * except maybe high bandwidth ...
+                        */
+
+                       /* Tell the caller to start an unlink */
+                       qh->needs_rescan = 1;
+                       break;
+               /* otherwise, unlink already started */
+               }
+       }
+
+       return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+// ... and packet size, for any kind of endpoint descriptor
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/*
+ * reverse of qh_urb_transaction:  free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free (
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       struct list_head        *qtd_list
+) {
+       struct list_head        *entry, *temp;
+
+       list_for_each_safe (entry, temp, qtd_list) {
+               struct fusbh200_qtd     *qtd;
+
+               qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
+               list_del (&qtd->qtd_list);
+               fusbh200_qtd_free (fusbh200, qtd);
+       }
+}
+
+/*
+ * create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *
+qh_urb_transaction (
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       struct list_head        *head,
+       gfp_t                   flags
+) {
+       struct fusbh200_qtd             *qtd, *qtd_prev;
+       dma_addr_t              buf;
+       int                     len, this_sg_len, maxpacket;
+       int                     is_input;
+       u32                     token;
+       int                     i;
+       struct scatterlist      *sg;
+
+       /*
+        * URBs map to sequences of QTDs:  one logical transaction
+        */
+       qtd = fusbh200_qtd_alloc (fusbh200, flags);
+       if (unlikely (!qtd))
+               return NULL;
+       list_add_tail (&qtd->qtd_list, head);
+       qtd->urb = urb;
+
+       token = QTD_STS_ACTIVE;
+       token |= (FUSBH200_TUNE_CERR << 10);
+       /* for split transactions, SplitXState initialized to zero */
+
+       len = urb->transfer_buffer_length;
+       is_input = usb_pipein (urb->pipe);
+       if (usb_pipecontrol (urb->pipe)) {
+               /* SETUP pid */
+               qtd_fill(fusbh200, qtd, urb->setup_dma,
+                               sizeof (struct usb_ctrlrequest),
+                               token | (2 /* "setup" */ << 8), 8);
+
+               /* ... and always at least one more pid */
+               token ^= QTD_TOGGLE;
+               qtd_prev = qtd;
+               qtd = fusbh200_qtd_alloc (fusbh200, flags);
+               if (unlikely (!qtd))
+                       goto cleanup;
+               qtd->urb = urb;
+               qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+               list_add_tail (&qtd->qtd_list, head);
+
+               /* for zero length DATA stages, STATUS is always IN */
+               if (len == 0)
+                       token |= (1 /* "in" */ << 8);
+       }
+
+       /*
+        * data transfer stage:  buffer setup
+        */
+       i = urb->num_mapped_sgs;
+       if (len > 0 && i > 0) {
+               sg = urb->sg;
+               buf = sg_dma_address(sg);
+
+               /* urb->transfer_buffer_length may be smaller than the
+                * size of the scatterlist (or vice versa)
+                */
+               this_sg_len = min_t(int, sg_dma_len(sg), len);
+       } else {
+               sg = NULL;
+               buf = urb->transfer_dma;
+               this_sg_len = len;
+       }
+
+       if (is_input)
+               token |= (1 /* "in" */ << 8);
+       /* else it's already initted to "out" pid (0 << 8) */
+
+       maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+       /*
+        * buffer gets wrapped in one or more qtds;
+        * last one may be "short" (including zero len)
+        * and may serve as a control status ack
+        */
+       for (;;) {
+               int this_qtd_len;
+
+               this_qtd_len = qtd_fill(fusbh200, qtd, buf, this_sg_len, token,
+                               maxpacket);
+               this_sg_len -= this_qtd_len;
+               len -= this_qtd_len;
+               buf += this_qtd_len;
+
+               /*
+                * short reads advance to a "magic" dummy instead of the next
+                * qtd ... that forces the queue to stop, for manual cleanup.
+                * (this will usually be overridden later.)
+                */
+               if (is_input)
+                       qtd->hw_alt_next = fusbh200->async->hw->hw_alt_next;
+
+               /* qh makes control packets use qtd toggle; maybe switch it */
+               if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+                       token ^= QTD_TOGGLE;
+
+               if (likely(this_sg_len <= 0)) {
+                       if (--i <= 0 || len <= 0)
+                               break;
+                       sg = sg_next(sg);
+                       buf = sg_dma_address(sg);
+                       this_sg_len = min_t(int, sg_dma_len(sg), len);
+               }
+
+               qtd_prev = qtd;
+               qtd = fusbh200_qtd_alloc (fusbh200, flags);
+               if (unlikely (!qtd))
+                       goto cleanup;
+               qtd->urb = urb;
+               qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+               list_add_tail (&qtd->qtd_list, head);
+       }
+
+       /*
+        * unless the caller requires manual cleanup after short reads,
+        * have the alt_next mechanism keep the queue running after the
+        * last data qtd (the only one, for control and most other cases).
+        */
+       if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+                               || usb_pipecontrol (urb->pipe)))
+               qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
+
+       /*
+        * control requests may need a terminating data "status" ack;
+        * other OUT ones may need a terminating short packet
+        * (zero length).
+        */
+       if (likely (urb->transfer_buffer_length != 0)) {
+               int     one_more = 0;
+
+               if (usb_pipecontrol (urb->pipe)) {
+                       one_more = 1;
+                       token ^= 0x0100;        /* "in" <--> "out"  */
+                       token |= QTD_TOGGLE;    /* force DATA1 */
+               } else if (usb_pipeout(urb->pipe)
+                               && (urb->transfer_flags & URB_ZERO_PACKET)
+                               && !(urb->transfer_buffer_length % maxpacket)) {
+                       one_more = 1;
+               }
+               if (one_more) {
+                       qtd_prev = qtd;
+                       qtd = fusbh200_qtd_alloc (fusbh200, flags);
+                       if (unlikely (!qtd))
+                               goto cleanup;
+                       qtd->urb = urb;
+                       qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
+                       list_add_tail (&qtd->qtd_list, head);
+
+                       /* never any data in such packets */
+                       qtd_fill(fusbh200, qtd, 0, 0, token, 0);
+               }
+       }
+
+       /* by default, enable interrupt on urb completion */
+       if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
+               qtd->hw_token |= cpu_to_hc32(fusbh200, QTD_IOC);
+       return head;
+
+cleanup:
+       qtd_list_free (fusbh200, urb, head);
+       return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// Would be best to create all qh's from config descriptors,
+// when each interface/altsetting is established.  Unlink
+// any previous qh and cancel its urbs first; endpoints are
+// implicitly reset then (data toggle too).
+// That'd mean updating how usbcore talks to HCDs. (2.7?)
+
+
+/*
+ * Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled.  For highspeed, that's
+ * just one microframe in the s-mask.  For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct fusbh200_qh *
+qh_make (
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       gfp_t                   flags
+) {
+       struct fusbh200_qh              *qh = fusbh200_qh_alloc (fusbh200, flags);
+       u32                     info1 = 0, info2 = 0;
+       int                     is_input, type;
+       int                     maxp = 0;
+       struct usb_tt           *tt = urb->dev->tt;
+       struct fusbh200_qh_hw   *hw;
+
+       if (!qh)
+               return qh;
+
+       /*
+        * init endpoint/device data for this QH
+        */
+       info1 |= usb_pipeendpoint (urb->pipe) << 8;
+       info1 |= usb_pipedevice (urb->pipe) << 0;
+
+       is_input = usb_pipein (urb->pipe);
+       type = usb_pipetype (urb->pipe);
+       maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
+
+       /* 1024 byte maxpacket is a hardware ceiling.  High bandwidth
+        * acts like up to 3KB, but is built from smaller packets.
+        */
+       if (max_packet(maxp) > 1024) {
+               fusbh200_dbg(fusbh200, "bogus qh maxpacket %d\n", max_packet(maxp));
+               goto done;
+       }
+
+       /* Compute interrupt scheduling parameters just once, and save.
+        * - allowing for high bandwidth, how many nsec/uframe are used?
+        * - split transactions need a second CSPLIT uframe; same question
+        * - splits also need a schedule gap (for full/low speed I/O)
+        * - qh has a polling interval
+        *
+        * For control/bulk requests, the HC or TT handles these.
+        */
+       if (type == PIPE_INTERRUPT) {
+               qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+                               is_input, 0,
+                               hb_mult(maxp) * max_packet(maxp)));
+               qh->start = NO_FRAME;
+
+               if (urb->dev->speed == USB_SPEED_HIGH) {
+                       qh->c_usecs = 0;
+                       qh->gap_uf = 0;
+
+                       qh->period = urb->interval >> 3;
+                       if (qh->period == 0 && urb->interval != 1) {
+                               /* NOTE interval 2 or 4 uframes could work.
+                                * But interval 1 scheduling is simpler, and
+                                * includes high bandwidth.
+                                */
+                               urb->interval = 1;
+                       } else if (qh->period > fusbh200->periodic_size) {
+                               qh->period = fusbh200->periodic_size;
+                               urb->interval = qh->period << 3;
+                       }
+               } else {
+                       int             think_time;
+
+                       /* gap is f(FS/LS transfer times) */
+                       qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
+                                       is_input, 0, maxp) / (125 * 1000);
+
+                       /* FIXME this just approximates SPLIT/CSPLIT times */
+                       if (is_input) {         // SPLIT, gap, CSPLIT+DATA
+                               qh->c_usecs = qh->usecs + HS_USECS (0);
+                               qh->usecs = HS_USECS (1);
+                       } else {                // SPLIT+DATA, gap, CSPLIT
+                               qh->usecs += HS_USECS (1);
+                               qh->c_usecs = HS_USECS (0);
+                       }
+
+                       think_time = tt ? tt->think_time : 0;
+                       qh->tt_usecs = NS_TO_US (think_time +
+                                       usb_calc_bus_time (urb->dev->speed,
+                                       is_input, 0, max_packet (maxp)));
+                       qh->period = urb->interval;
+                       if (qh->period > fusbh200->periodic_size) {
+                               qh->period = fusbh200->periodic_size;
+                               urb->interval = qh->period;
+                       }
+               }
+       }
+
+       /* support for tt scheduling, and access to toggles */
+       qh->dev = urb->dev;
+
+       /* using TT? */
+       switch (urb->dev->speed) {
+       case USB_SPEED_LOW:
+               info1 |= QH_LOW_SPEED;
+               /* FALL THROUGH */
+
+       case USB_SPEED_FULL:
+               /* EPS 0 means "full" */
+               if (type != PIPE_INTERRUPT)
+                       info1 |= (FUSBH200_TUNE_RL_TT << 28);
+               if (type == PIPE_CONTROL) {
+                       info1 |= QH_CONTROL_EP;         /* for TT */
+                       info1 |= QH_TOGGLE_CTL;         /* toggle from qtd */
+               }
+               info1 |= maxp << 16;
+
+               info2 |= (FUSBH200_TUNE_MULT_TT << 30);
+
+               /* Some Freescale processors have an erratum in which the
+                * port number in the queue head was 0..N-1 instead of 1..N.
+                */
+               if (fusbh200_has_fsl_portno_bug(fusbh200))
+                       info2 |= (urb->dev->ttport-1) << 23;
+               else
+                       info2 |= urb->dev->ttport << 23;
+
+               /* set the address of the TT; for TDI's integrated
+                * root hub tt, leave it zeroed.
+                */
+               if (tt && tt->hub != fusbh200_to_hcd(fusbh200)->self.root_hub)
+                       info2 |= tt->hub->devnum << 16;
+
+               /* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+               break;
+
+       case USB_SPEED_HIGH:            /* no TT involved */
+               info1 |= QH_HIGH_SPEED;
+               if (type == PIPE_CONTROL) {
+                       info1 |= (FUSBH200_TUNE_RL_HS << 28);
+                       info1 |= 64 << 16;      /* usb2 fixed maxpacket */
+                       info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+                       info2 |= (FUSBH200_TUNE_MULT_HS << 30);
+               } else if (type == PIPE_BULK) {
+                       info1 |= (FUSBH200_TUNE_RL_HS << 28);
+                       /* The USB spec says that high speed bulk endpoints
+                        * always use 512 byte maxpacket.  But some device
+                        * vendors decided to ignore that, and MSFT is happy
+                        * to help them do so.  So now people expect to use
+                        * such nonconformant devices with Linux too; sigh.
+                        */
+                       info1 |= max_packet(maxp) << 16;
+                       info2 |= (FUSBH200_TUNE_MULT_HS << 30);
+               } else {                /* PIPE_INTERRUPT */
+                       info1 |= max_packet (maxp) << 16;
+                       info2 |= hb_mult (maxp) << 30;
+               }
+               break;
+       default:
+               fusbh200_dbg(fusbh200, "bogus dev %p speed %d\n", urb->dev,
+                       urb->dev->speed);
+done:
+               qh_destroy(fusbh200, qh);
+               return NULL;
+       }
+
+       /* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+       /* init as live, toggle clear, advance to dummy */
+       qh->qh_state = QH_STATE_IDLE;
+       hw = qh->hw;
+       hw->hw_info1 = cpu_to_hc32(fusbh200, info1);
+       hw->hw_info2 = cpu_to_hc32(fusbh200, info2);
+       qh->is_out = !is_input;
+       usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
+       qh_refresh (fusbh200, qh);
+       return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_async(struct fusbh200_hcd *fusbh200)
+{
+       if (fusbh200->async_count++)
+               return;
+
+       /* Stop waiting to turn off the async schedule */
+       fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_ASYNC);
+
+       /* Don't start the schedule until ASS is 0 */
+       fusbh200_poll_ASS(fusbh200);
+       turn_on_io_watchdog(fusbh200);
+}
+
+static void disable_async(struct fusbh200_hcd *fusbh200)
+{
+       if (--fusbh200->async_count)
+               return;
+
+       /* The async schedule and async_unlink list are supposed to be empty */
+       WARN_ON(fusbh200->async->qh_next.qh || fusbh200->async_unlink);
+
+       /* Don't turn off the schedule until ASS is 1 */
+       fusbh200_poll_ASS(fusbh200);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue.  */
+
+static void qh_link_async (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       __hc32          dma = QH_NEXT(fusbh200, qh->qh_dma);
+       struct fusbh200_qh      *head;
+
+       /* Don't link a QH if there's a Clear-TT-Buffer pending */
+       if (unlikely(qh->clearing_tt))
+               return;
+
+       WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+       /* clear halt and/or toggle; and maybe recover from silicon quirk */
+       qh_refresh(fusbh200, qh);
+
+       /* splice right after start */
+       head = fusbh200->async;
+       qh->qh_next = head->qh_next;
+       qh->hw->hw_next = head->hw->hw_next;
+       wmb ();
+
+       head->qh_next.qh = qh;
+       head->hw->hw_next = dma;
+
+       qh->xacterrs = 0;
+       qh->qh_state = QH_STATE_LINKED;
+       /* qtd completions reported later by interrupt */
+
+       enable_async(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct fusbh200_qh *qh_append_tds (
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       struct list_head        *qtd_list,
+       int                     epnum,
+       void                    **ptr
+)
+{
+       struct fusbh200_qh              *qh = NULL;
+       __hc32                  qh_addr_mask = cpu_to_hc32(fusbh200, 0x7f);
+
+       qh = (struct fusbh200_qh *) *ptr;
+       if (unlikely (qh == NULL)) {
+               /* can't sleep here, we have fusbh200->lock... */
+               qh = qh_make (fusbh200, urb, GFP_ATOMIC);
+               *ptr = qh;
+       }
+       if (likely (qh != NULL)) {
+               struct fusbh200_qtd     *qtd;
+
+               if (unlikely (list_empty (qtd_list)))
+                       qtd = NULL;
+               else
+                       qtd = list_entry (qtd_list->next, struct fusbh200_qtd,
+                                       qtd_list);
+
+               /* control qh may need patching ... */
+               if (unlikely (epnum == 0)) {
+
+                        /* usb_reset_device() briefly reverts to address 0 */
+                        if (usb_pipedevice (urb->pipe) == 0)
+                               qh->hw->hw_info1 &= ~qh_addr_mask;
+               }
+
+               /* just one way to queue requests: swap with the dummy qtd.
+                * only hc or qh_refresh() ever modify the overlay.
+                */
+               if (likely (qtd != NULL)) {
+                       struct fusbh200_qtd             *dummy;
+                       dma_addr_t              dma;
+                       __hc32                  token;
+
+                       /* to avoid racing the HC, use the dummy td instead of
+                        * the first td of our list (becomes new dummy).  both
+                        * tds stay deactivated until we're done, when the
+                        * HC is allowed to fetch the old dummy (4.10.2).
+                        */
+                       token = qtd->hw_token;
+                       qtd->hw_token = HALT_BIT(fusbh200);
+
+                       dummy = qh->dummy;
+
+                       dma = dummy->qtd_dma;
+                       *dummy = *qtd;
+                       dummy->qtd_dma = dma;
+
+                       list_del (&qtd->qtd_list);
+                       list_add (&dummy->qtd_list, qtd_list);
+                       list_splice_tail(qtd_list, &qh->qtd_list);
+
+                       fusbh200_qtd_init(fusbh200, qtd, qtd->qtd_dma);
+                       qh->dummy = qtd;
+
+                       /* hc must see the new dummy at list end */
+                       dma = qtd->qtd_dma;
+                       qtd = list_entry (qh->qtd_list.prev,
+                                       struct fusbh200_qtd, qtd_list);
+                       qtd->hw_next = QTD_NEXT(fusbh200, dma);
+
+                       /* let the hc process these next qtds */
+                       wmb ();
+                       dummy->hw_token = token;
+
+                       urb->hcpriv = qh;
+               }
+       }
+       return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+submit_async (
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       struct list_head        *qtd_list,
+       gfp_t                   mem_flags
+) {
+       int                     epnum;
+       unsigned long           flags;
+       struct fusbh200_qh              *qh = NULL;
+       int                     rc;
+
+       epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef FUSBH200_URB_TRACE
+       {
+               struct fusbh200_qtd *qtd;
+               qtd = list_entry(qtd_list->next, struct fusbh200_qtd, qtd_list);
+               fusbh200_dbg(fusbh200,
+                        "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+                        __func__, urb->dev->devpath, urb,
+                        epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+                        urb->transfer_buffer_length,
+                        qtd, urb->ep->hcpriv);
+       }
+#endif
+
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+               rc = -ESHUTDOWN;
+               goto done;
+       }
+       rc = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+       if (unlikely(rc))
+               goto done;
+
+       qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
+       if (unlikely(qh == NULL)) {
+               usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+               rc = -ENOMEM;
+               goto done;
+       }
+
+       /* Control/bulk operations through TTs don't need scheduling,
+        * the HC and TT handle it when the TT has a buffer ready.
+        */
+       if (likely (qh->qh_state == QH_STATE_IDLE))
+               qh_link_async(fusbh200, qh);
+ done:
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       if (unlikely (qh == NULL))
+               qtd_list_free (fusbh200, urb, qtd_list);
+       return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void single_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       struct fusbh200_qh              *prev;
+
+       /* Add to the end of the list of QHs waiting for the next IAAD */
+       qh->qh_state = QH_STATE_UNLINK;
+       if (fusbh200->async_unlink)
+               fusbh200->async_unlink_last->unlink_next = qh;
+       else
+               fusbh200->async_unlink = qh;
+       fusbh200->async_unlink_last = qh;
+
+       /* Unlink it from the schedule */
+       prev = fusbh200->async;
+       while (prev->qh_next.qh != qh)
+               prev = prev->qh_next.qh;
+
+       prev->hw->hw_next = qh->hw->hw_next;
+       prev->qh_next = qh->qh_next;
+       if (fusbh200->qh_scan_next == qh)
+               fusbh200->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct fusbh200_hcd *fusbh200, bool nested)
+{
+       /*
+        * Do nothing if an IAA cycle is already running or
+        * if one will be started shortly.
+        */
+       if (fusbh200->async_iaa || fusbh200->async_unlinking)
+               return;
+
+       /* Do all the waiting QHs at once */
+       fusbh200->async_iaa = fusbh200->async_unlink;
+       fusbh200->async_unlink = NULL;
+
+       /* If the controller isn't running, we don't have to wait for it */
+       if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING)) {
+               if (!nested)            /* Avoid recursion */
+                       end_unlink_async(fusbh200);
+
+       /* Otherwise start a new IAA cycle */
+       } else if (likely(fusbh200->rh_state == FUSBH200_RH_RUNNING)) {
+               /* Make sure the unlinks are all visible to the hardware */
+               wmb();
+
+               fusbh200_writel(fusbh200, fusbh200->command | CMD_IAAD,
+                               &fusbh200->regs->command);
+               fusbh200_readl(fusbh200, &fusbh200->regs->command);
+               fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IAA_WATCHDOG, true);
+       }
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct fusbh200_hcd *fusbh200)
+{
+       struct fusbh200_qh              *qh;
+
+       /* Process the idle QHs */
+ restart:
+       fusbh200->async_unlinking = true;
+       while (fusbh200->async_iaa) {
+               qh = fusbh200->async_iaa;
+               fusbh200->async_iaa = qh->unlink_next;
+               qh->unlink_next = NULL;
+
+               qh->qh_state = QH_STATE_IDLE;
+               qh->qh_next.qh = NULL;
+
+               qh_completions(fusbh200, qh);
+               if (!list_empty(&qh->qtd_list) &&
+                               fusbh200->rh_state == FUSBH200_RH_RUNNING)
+                       qh_link_async(fusbh200, qh);
+               disable_async(fusbh200);
+       }
+       fusbh200->async_unlinking = false;
+
+       /* Start a new IAA cycle if any QHs are waiting for it */
+       if (fusbh200->async_unlink) {
+               start_iaa_cycle(fusbh200, true);
+               if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING))
+                       goto restart;
+       }
+}
+
+static void unlink_empty_async(struct fusbh200_hcd *fusbh200)
+{
+       struct fusbh200_qh              *qh, *next;
+       bool                    stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
+       bool                    check_unlinks_later = false;
+
+       /* Unlink all the async QHs that have been empty for a timer cycle */
+       next = fusbh200->async->qh_next.qh;
+       while (next) {
+               qh = next;
+               next = qh->qh_next.qh;
+
+               if (list_empty(&qh->qtd_list) &&
+                               qh->qh_state == QH_STATE_LINKED) {
+                       if (!stopped && qh->unlink_cycle ==
+                                       fusbh200->async_unlink_cycle)
+                               check_unlinks_later = true;
+                       else
+                               single_unlink_async(fusbh200, qh);
+               }
+       }
+
+       /* Start a new IAA cycle if any QHs are waiting for it */
+       if (fusbh200->async_unlink)
+               start_iaa_cycle(fusbh200, false);
+
+       /* QHs that haven't been empty for long enough will be handled later */
+       if (check_unlinks_later) {
+               fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
+               ++fusbh200->async_unlink_cycle;
+       }
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own fusbh200->lock */
+
+static void start_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       /*
+        * If the QH isn't linked then there's nothing we can do
+        * unless we were called during a giveback, in which case
+        * qh_completions() has to deal with it.
+        */
+       if (qh->qh_state != QH_STATE_LINKED) {
+               if (qh->qh_state == QH_STATE_COMPLETING)
+                       qh->needs_rescan = 1;
+               return;
+       }
+
+       single_unlink_async(fusbh200, qh);
+       start_iaa_cycle(fusbh200, false);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async (struct fusbh200_hcd *fusbh200)
+{
+       struct fusbh200_qh              *qh;
+       bool                    check_unlinks_later = false;
+
+       fusbh200->qh_scan_next = fusbh200->async->qh_next.qh;
+       while (fusbh200->qh_scan_next) {
+               qh = fusbh200->qh_scan_next;
+               fusbh200->qh_scan_next = qh->qh_next.qh;
+ rescan:
+               /* clean any finished work for this qh */
+               if (!list_empty(&qh->qtd_list)) {
+                       int temp;
+
+                       /*
+                        * Unlinks could happen here; completion reporting
+                        * drops the lock.  That's why fusbh200->qh_scan_next
+                        * always holds the next qh to scan; if the next qh
+                        * gets unlinked then fusbh200->qh_scan_next is adjusted
+                        * in single_unlink_async().
+                        */
+                       temp = qh_completions(fusbh200, qh);
+                       if (qh->needs_rescan) {
+                               start_unlink_async(fusbh200, qh);
+                       } else if (list_empty(&qh->qtd_list)
+                                       && qh->qh_state == QH_STATE_LINKED) {
+                               qh->unlink_cycle = fusbh200->async_unlink_cycle;
+                               check_unlinks_later = true;
+                       } else if (temp != 0)
+                               goto rescan;
+               }
+       }
+
+       /*
+        * Unlink empty entries, reducing DMA usage as well
+        * as HCD schedule-scanning costs.  Delay for any qh
+        * we just scanned, there's a not-unusual case that it
+        * doesn't stay idle for long.
+        */
+       if (check_unlinks_later && fusbh200->rh_state == FUSBH200_RH_RUNNING &&
+                       !(fusbh200->enabled_hrtimer_events &
+                               BIT(FUSBH200_HRTIMER_ASYNC_UNLINKS))) {
+               fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
+               ++fusbh200->async_unlink_cycle;
+       }
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI scheduled transaction support:  interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+
+static int fusbh200_get_frame (struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd
+ * @tag: hardware tag for type of this record
+ */
+static union fusbh200_shadow *
+periodic_next_shadow(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
+               __hc32 tag)
+{
+       switch (hc32_to_cpu(fusbh200, tag)) {
+       case Q_TYPE_QH:
+               return &periodic->qh->qh_next;
+       case Q_TYPE_FSTN:
+               return &periodic->fstn->fstn_next;
+       default:
+               return &periodic->itd->itd_next;
+       }
+}
+
+static __hc32 *
+shadow_next_periodic(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
+               __hc32 tag)
+{
+       switch (hc32_to_cpu(fusbh200, tag)) {
+       /* our fusbh200_shadow.qh is actually software part */
+       case Q_TYPE_QH:
+               return &periodic->qh->hw->hw_next;
+       /* others are hw parts */
+       default:
+               return periodic->hw_next;
+       }
+}
+
+/* caller must hold fusbh200->lock */
+static void periodic_unlink (struct fusbh200_hcd *fusbh200, unsigned frame, void *ptr)
+{
+       union fusbh200_shadow   *prev_p = &fusbh200->pshadow[frame];
+       __hc32                  *hw_p = &fusbh200->periodic[frame];
+       union fusbh200_shadow   here = *prev_p;
+
+       /* find predecessor of "ptr"; hw and shadow lists are in sync */
+       while (here.ptr && here.ptr != ptr) {
+               prev_p = periodic_next_shadow(fusbh200, prev_p,
+                               Q_NEXT_TYPE(fusbh200, *hw_p));
+               hw_p = shadow_next_periodic(fusbh200, &here,
+                               Q_NEXT_TYPE(fusbh200, *hw_p));
+               here = *prev_p;
+       }
+       /* an interrupt entry (at list end) could have been shared */
+       if (!here.ptr)
+               return;
+
+       /* update shadow and hardware lists ... the old "next" pointers
+        * from ptr may still be in use, the caller updates them.
+        */
+       *prev_p = *periodic_next_shadow(fusbh200, &here,
+                       Q_NEXT_TYPE(fusbh200, *hw_p));
+
+       *hw_p = *shadow_next_periodic(fusbh200, &here,
+                               Q_NEXT_TYPE(fusbh200, *hw_p));
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short
+periodic_usecs (struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe)
+{
+       __hc32                  *hw_p = &fusbh200->periodic [frame];
+       union fusbh200_shadow   *q = &fusbh200->pshadow [frame];
+       unsigned                usecs = 0;
+       struct fusbh200_qh_hw   *hw;
+
+       while (q->ptr) {
+               switch (hc32_to_cpu(fusbh200, Q_NEXT_TYPE(fusbh200, *hw_p))) {
+               case Q_TYPE_QH:
+                       hw = q->qh->hw;
+                       /* is it in the S-mask? */
+                       if (hw->hw_info2 & cpu_to_hc32(fusbh200, 1 << uframe))
+                               usecs += q->qh->usecs;
+                       /* ... or C-mask? */
+                       if (hw->hw_info2 & cpu_to_hc32(fusbh200,
+                                       1 << (8 + uframe)))
+                               usecs += q->qh->c_usecs;
+                       hw_p = &hw->hw_next;
+                       q = &q->qh->qh_next;
+                       break;
+               // case Q_TYPE_FSTN:
+               default:
+                       /* for "save place" FSTNs, count the relevant INTR
+                        * bandwidth from the previous frame
+                        */
+                       if (q->fstn->hw_prev != FUSBH200_LIST_END(fusbh200)) {
+                               fusbh200_dbg (fusbh200, "ignoring FSTN cost ...\n");
+                       }
+                       hw_p = &q->fstn->hw_next;
+                       q = &q->fstn->fstn_next;
+                       break;
+               case Q_TYPE_ITD:
+                       if (q->itd->hw_transaction[uframe])
+                               usecs += q->itd->stream->usecs;
+                       hw_p = &q->itd->hw_next;
+                       q = &q->itd->itd_next;
+                       break;
+               }
+       }
+#ifdef DEBUG
+       if (usecs > fusbh200->uframe_periodic_max)
+               fusbh200_err (fusbh200, "uframe %d sched overrun: %d usecs\n",
+                       frame * 8 + uframe, usecs);
+#endif
+       return usecs;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
+{
+       if (!dev1->tt || !dev2->tt)
+               return 0;
+       if (dev1->tt != dev2->tt)
+               return 0;
+       if (dev1->tt->multi)
+               return dev1->ttport == dev2->ttport;
+       else
+               return 1;
+}
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision (
+       struct fusbh200_hcd             *fusbh200,
+       unsigned                period,
+       struct usb_device       *dev,
+       unsigned                frame,
+       u32                     uf_mask
+)
+{
+       if (period == 0)        /* error */
+               return 0;
+
+       /* note bandwidth wastage:  split never follows csplit
+        * (different dev or endpoint) until the next uframe.
+        * calling convention doesn't make that distinction.
+        */
+       for (; frame < fusbh200->periodic_size; frame += period) {
+               union fusbh200_shadow   here;
+               __hc32                  type;
+               struct fusbh200_qh_hw   *hw;
+
+               here = fusbh200->pshadow [frame];
+               type = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [frame]);
+               while (here.ptr) {
+                       switch (hc32_to_cpu(fusbh200, type)) {
+                       case Q_TYPE_ITD:
+                               type = Q_NEXT_TYPE(fusbh200, here.itd->hw_next);
+                               here = here.itd->itd_next;
+                               continue;
+                       case Q_TYPE_QH:
+                               hw = here.qh->hw;
+                               if (same_tt (dev, here.qh->dev)) {
+                                       u32             mask;
+
+                                       mask = hc32_to_cpu(fusbh200,
+                                                       hw->hw_info2);
+                                       /* "knows" no gap is needed */
+                                       mask |= mask >> 8;
+                                       if (mask & uf_mask)
+                                               break;
+                               }
+                               type = Q_NEXT_TYPE(fusbh200, hw->hw_next);
+                               here = here.qh->qh_next;
+                               continue;
+                       // case Q_TYPE_FSTN:
+                       default:
+                               fusbh200_dbg (fusbh200,
+                                       "periodic frame %d bogus type %d\n",
+                                       frame, type);
+                       }
+
+                       /* collision or error */
+                       return 0;
+               }
+       }
+
+       /* no collision */
+       return 1;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_periodic(struct fusbh200_hcd *fusbh200)
+{
+       if (fusbh200->periodic_count++)
+               return;
+
+       /* Stop waiting to turn off the periodic schedule */
+       fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_PERIODIC);
+
+       /* Don't start the schedule until PSS is 0 */
+       fusbh200_poll_PSS(fusbh200);
+       turn_on_io_watchdog(fusbh200);
+}
+
+static void disable_periodic(struct fusbh200_hcd *fusbh200)
+{
+       if (--fusbh200->periodic_count)
+               return;
+
+       /* Don't turn off the schedule until PSS is 1 */
+       fusbh200_poll_PSS(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; fusbh200 0.96+)
+ */
+static void qh_link_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       unsigned        i;
+       unsigned        period = qh->period;
+
+       dev_dbg (&qh->dev->dev,
+               "link qh%d-%04x/%p start %d [%d/%d us]\n",
+               period, hc32_to_cpup(fusbh200, &qh->hw->hw_info2)
+                       & (QH_CMASK | QH_SMASK),
+               qh, qh->start, qh->usecs, qh->c_usecs);
+
+       /* high bandwidth, or otherwise every microframe */
+       if (period == 0)
+               period = 1;
+
+       for (i = qh->start; i < fusbh200->periodic_size; i += period) {
+               union fusbh200_shadow   *prev = &fusbh200->pshadow[i];
+               __hc32                  *hw_p = &fusbh200->periodic[i];
+               union fusbh200_shadow   here = *prev;
+               __hc32                  type = 0;
+
+               /* skip the iso nodes at list head */
+               while (here.ptr) {
+                       type = Q_NEXT_TYPE(fusbh200, *hw_p);
+                       if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
+                               break;
+                       prev = periodic_next_shadow(fusbh200, prev, type);
+                       hw_p = shadow_next_periodic(fusbh200, &here, type);
+                       here = *prev;
+               }
+
+               /* sorting each branch by period (slow-->fast)
+                * enables sharing interior tree nodes
+                */
+               while (here.ptr && qh != here.qh) {
+                       if (qh->period > here.qh->period)
+                               break;
+                       prev = &here.qh->qh_next;
+                       hw_p = &here.qh->hw->hw_next;
+                       here = *prev;
+               }
+               /* link in this qh, unless some earlier pass did that */
+               if (qh != here.qh) {
+                       qh->qh_next = here;
+                       if (here.qh)
+                               qh->hw->hw_next = *hw_p;
+                       wmb ();
+                       prev->qh = qh;
+                       *hw_p = QH_NEXT (fusbh200, qh->qh_dma);
+               }
+       }
+       qh->qh_state = QH_STATE_LINKED;
+       qh->xacterrs = 0;
+
+       /* update per-qh bandwidth for usbfs */
+       fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated += qh->period
+               ? ((qh->usecs + qh->c_usecs) / qh->period)
+               : (qh->usecs * 8);
+
+       list_add(&qh->intr_node, &fusbh200->intr_qh_list);
+
+       /* maybe enable periodic schedule processing */
+       ++fusbh200->intr_count;
+       enable_periodic(fusbh200);
+}
+
+static void qh_unlink_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       unsigned        i;
+       unsigned        period;
+
+       /*
+        * If qh is for a low/full-speed device, simply unlinking it
+        * could interfere with an ongoing split transaction.  To unlink
+        * it safely would require setting the QH_INACTIVATE bit and
+        * waiting at least one frame, as described in EHCI 4.12.2.5.
+        *
+        * We won't bother with any of this.  Instead, we assume that the
+        * only reason for unlinking an interrupt QH while the current URB
+        * is still active is to dequeue all the URBs (flush the whole
+        * endpoint queue).
+        *
+        * If rebalancing the periodic schedule is ever implemented, this
+        * approach will no longer be valid.
+        */
+
+       /* high bandwidth, or otherwise part of every microframe */
+       if ((period = qh->period) == 0)
+               period = 1;
+
+       for (i = qh->start; i < fusbh200->periodic_size; i += period)
+               periodic_unlink (fusbh200, i, qh);
+
+       /* update per-qh bandwidth for usbfs */
+       fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated -= qh->period
+               ? ((qh->usecs + qh->c_usecs) / qh->period)
+               : (qh->usecs * 8);
+
+       dev_dbg (&qh->dev->dev,
+               "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+               qh->period,
+               hc32_to_cpup(fusbh200, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
+               qh, qh->start, qh->usecs, qh->c_usecs);
+
+       /* qh->qh_next still "live" to HC */
+       qh->qh_state = QH_STATE_UNLINK;
+       qh->qh_next.ptr = NULL;
+
+       if (fusbh200->qh_scan_next == qh)
+               fusbh200->qh_scan_next = list_entry(qh->intr_node.next,
+                               struct fusbh200_qh, intr_node);
+       list_del(&qh->intr_node);
+}
+
+static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       /* If the QH isn't linked then there's nothing we can do
+        * unless we were called during a giveback, in which case
+        * qh_completions() has to deal with it.
+        */
+       if (qh->qh_state != QH_STATE_LINKED) {
+               if (qh->qh_state == QH_STATE_COMPLETING)
+                       qh->needs_rescan = 1;
+               return;
+       }
+
+       qh_unlink_periodic (fusbh200, qh);
+
+       /* Make sure the unlinks are visible before starting the timer */
+       wmb();
+
+       /*
+        * The EHCI spec doesn't say how long it takes the controller to
+        * stop accessing an unlinked interrupt QH.  The timer delay is
+        * 9 uframes; presumably that will be long enough.
+        */
+       qh->unlink_cycle = fusbh200->intr_unlink_cycle;
+
+       /* New entries go at the end of the intr_unlink list */
+       if (fusbh200->intr_unlink)
+               fusbh200->intr_unlink_last->unlink_next = qh;
+       else
+               fusbh200->intr_unlink = qh;
+       fusbh200->intr_unlink_last = qh;
+
+       if (fusbh200->intr_unlinking)
+               ;       /* Avoid recursive calls */
+       else if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+               fusbh200_handle_intr_unlinks(fusbh200);
+       else if (fusbh200->intr_unlink == qh) {
+               fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
+               ++fusbh200->intr_unlink_cycle;
+       }
+}
+
+static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       struct fusbh200_qh_hw   *hw = qh->hw;
+       int                     rc;
+
+       qh->qh_state = QH_STATE_IDLE;
+       hw->hw_next = FUSBH200_LIST_END(fusbh200);
+
+       qh_completions(fusbh200, qh);
+
+       /* reschedule QH iff another request is queued */
+       if (!list_empty(&qh->qtd_list) && fusbh200->rh_state == FUSBH200_RH_RUNNING) {
+               rc = qh_schedule(fusbh200, qh);
+
+               /* An error here likely indicates handshake failure
+                * or no space left in the schedule.  Neither fault
+                * should happen often ...
+                *
+                * FIXME kill the now-dysfunctional queued urbs
+                */
+               if (rc != 0)
+                       fusbh200_err(fusbh200, "can't reschedule qh %p, err %d\n",
+                                       qh, rc);
+       }
+
+       /* maybe turn off periodic schedule */
+       --fusbh200->intr_count;
+       disable_periodic(fusbh200);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_period (
+       struct fusbh200_hcd *fusbh200,
+       unsigned        frame,
+       unsigned        uframe,
+       unsigned        period,
+       unsigned        usecs
+) {
+       int             claimed;
+
+       /* complete split running into next frame?
+        * given FSTN support, we could sometimes check...
+        */
+       if (uframe >= 8)
+               return 0;
+
+       /* convert "usecs we need" to "max already claimed" */
+       usecs = fusbh200->uframe_periodic_max - usecs;
+
+       /* we "know" 2 and 4 uframe intervals were rejected; so
+        * for period 0, check _every_ microframe in the schedule.
+        */
+       if (unlikely (period == 0)) {
+               do {
+                       for (uframe = 0; uframe < 7; uframe++) {
+                               claimed = periodic_usecs (fusbh200, frame, uframe);
+                               if (claimed > usecs)
+                                       return 0;
+                       }
+               } while ((frame += 1) < fusbh200->periodic_size);
+
+       /* just check the specified uframe, at that period */
+       } else {
+               do {
+                       claimed = periodic_usecs (fusbh200, frame, uframe);
+                       if (claimed > usecs)
+                               return 0;
+               } while ((frame += period) < fusbh200->periodic_size);
+       }
+
+       // success!
+       return 1;
+}
+
+static int check_intr_schedule (
+       struct fusbh200_hcd             *fusbh200,
+       unsigned                frame,
+       unsigned                uframe,
+       const struct fusbh200_qh        *qh,
+       __hc32                  *c_maskp
+)
+{
+       int             retval = -ENOSPC;
+       u8              mask = 0;
+
+       if (qh->c_usecs && uframe >= 6)         /* FSTN territory? */
+               goto done;
+
+       if (!check_period (fusbh200, frame, uframe, qh->period, qh->usecs))
+               goto done;
+       if (!qh->c_usecs) {
+               retval = 0;
+               *c_maskp = 0;
+               goto done;
+       }
+
+       /* Make sure this tt's buffer is also available for CSPLITs.
+        * We pessimize a bit; probably the typical full speed case
+        * doesn't need the second CSPLIT.
+        *
+        * NOTE:  both SPLIT and CSPLIT could be checked in just
+        * one smart pass...
+        */
+       mask = 0x03 << (uframe + qh->gap_uf);
+       *c_maskp = cpu_to_hc32(fusbh200, mask << 8);
+
+       mask |= 1 << uframe;
+       if (tt_no_collision (fusbh200, qh->period, qh->dev, frame, mask)) {
+               if (!check_period (fusbh200, frame, uframe + qh->gap_uf + 1,
+                                       qh->period, qh->c_usecs))
+                       goto done;
+               if (!check_period (fusbh200, frame, uframe + qh->gap_uf,
+                                       qh->period, qh->c_usecs))
+                       goto done;
+               retval = 0;
+       }
+done:
+       return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
+{
+       int             status;
+       unsigned        uframe;
+       __hc32          c_mask;
+       unsigned        frame;          /* 0..(qh->period - 1), or NO_FRAME */
+       struct fusbh200_qh_hw   *hw = qh->hw;
+
+       qh_refresh(fusbh200, qh);
+       hw->hw_next = FUSBH200_LIST_END(fusbh200);
+       frame = qh->start;
+
+       /* reuse the previous schedule slots, if we can */
+       if (frame < qh->period) {
+               uframe = ffs(hc32_to_cpup(fusbh200, &hw->hw_info2) & QH_SMASK);
+               status = check_intr_schedule (fusbh200, frame, --uframe,
+                               qh, &c_mask);
+       } else {
+               uframe = 0;
+               c_mask = 0;
+               status = -ENOSPC;
+       }
+
+       /* else scan the schedule to find a group of slots such that all
+        * uframes have enough periodic bandwidth available.
+        */
+       if (status) {
+               /* "normal" case, uframing flexible except with splits */
+               if (qh->period) {
+                       int             i;
+
+                       for (i = qh->period; status && i > 0; --i) {
+                               frame = ++fusbh200->random_frame % qh->period;
+                               for (uframe = 0; uframe < 8; uframe++) {
+                                       status = check_intr_schedule (fusbh200,
+                                                       frame, uframe, qh,
+                                                       &c_mask);
+                                       if (status == 0)
+                                               break;
+                               }
+                       }
+
+               /* qh->period == 0 means every uframe */
+               } else {
+                       frame = 0;
+                       status = check_intr_schedule (fusbh200, 0, 0, qh, &c_mask);
+               }
+               if (status)
+                       goto done;
+               qh->start = frame;
+
+               /* reset S-frame and (maybe) C-frame masks */
+               hw->hw_info2 &= cpu_to_hc32(fusbh200, ~(QH_CMASK | QH_SMASK));
+               hw->hw_info2 |= qh->period
+                       ? cpu_to_hc32(fusbh200, 1 << uframe)
+                       : cpu_to_hc32(fusbh200, QH_SMASK);
+               hw->hw_info2 |= c_mask;
+       } else
+               fusbh200_dbg (fusbh200, "reused qh %p schedule\n", qh);
+
+       /* stuff into the periodic schedule */
+       qh_link_periodic(fusbh200, qh);
+done:
+       return status;
+}
+
+static int intr_submit (
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       struct list_head        *qtd_list,
+       gfp_t                   mem_flags
+) {
+       unsigned                epnum;
+       unsigned long           flags;
+       struct fusbh200_qh              *qh;
+       int                     status;
+       struct list_head        empty;
+
+       /* get endpoint and transfer/schedule data */
+       epnum = urb->ep->desc.bEndpointAddress;
+
+       spin_lock_irqsave (&fusbh200->lock, flags);
+
+       if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+               status = -ESHUTDOWN;
+               goto done_not_linked;
+       }
+       status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+       if (unlikely(status))
+               goto done_not_linked;
+
+       /* get qh and force any scheduling errors */
+       INIT_LIST_HEAD (&empty);
+       qh = qh_append_tds(fusbh200, urb, &empty, epnum, &urb->ep->hcpriv);
+       if (qh == NULL) {
+               status = -ENOMEM;
+               goto done;
+       }
+       if (qh->qh_state == QH_STATE_IDLE) {
+               if ((status = qh_schedule (fusbh200, qh)) != 0)
+                       goto done;
+       }
+
+       /* then queue the urb's tds to the qh */
+       qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
+       BUG_ON (qh == NULL);
+
+       /* ... update usbfs periodic stats */
+       fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs++;
+
+done:
+       if (unlikely(status))
+               usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+done_not_linked:
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       if (status)
+               qtd_list_free (fusbh200, urb, qtd_list);
+
+       return status;
+}
+
+static void scan_intr(struct fusbh200_hcd *fusbh200)
+{
+       struct fusbh200_qh              *qh;
+
+       list_for_each_entry_safe(qh, fusbh200->qh_scan_next, &fusbh200->intr_qh_list,
+                       intr_node) {
+ rescan:
+               /* clean any finished work for this qh */
+               if (!list_empty(&qh->qtd_list)) {
+                       int temp;
+
+                       /*
+                        * Unlinks could happen here; completion reporting
+                        * drops the lock.  That's why fusbh200->qh_scan_next
+                        * always holds the next qh to scan; if the next qh
+                        * gets unlinked then fusbh200->qh_scan_next is adjusted
+                        * in qh_unlink_periodic().
+                        */
+                       temp = qh_completions(fusbh200, qh);
+                       if (unlikely(qh->needs_rescan ||
+                                       (list_empty(&qh->qtd_list) &&
+                                               qh->qh_state == QH_STATE_LINKED)))
+                               start_unlink_intr(fusbh200, qh);
+                       else if (temp != 0)
+                               goto rescan;
+               }
+       }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fusbh200_iso_stream ops work with both ITD and SITD */
+
+static struct fusbh200_iso_stream *
+iso_stream_alloc (gfp_t mem_flags)
+{
+       struct fusbh200_iso_stream *stream;
+
+       stream = kzalloc(sizeof *stream, mem_flags);
+       if (likely (stream != NULL)) {
+               INIT_LIST_HEAD(&stream->td_list);
+               INIT_LIST_HEAD(&stream->free_list);
+               stream->next_uframe = -1;
+       }
+       return stream;
+}
+
+static void
+iso_stream_init (
+       struct fusbh200_hcd             *fusbh200,
+       struct fusbh200_iso_stream      *stream,
+       struct usb_device       *dev,
+       int                     pipe,
+       unsigned                interval
+)
+{
+       u32                     buf1;
+       unsigned                epnum, maxp;
+       int                     is_input;
+       long                    bandwidth;
+       unsigned                multi;
+
+       /*
+        * this might be a "high bandwidth" highspeed endpoint,
+        * as encoded in the ep descriptor's wMaxPacket field
+        */
+       epnum = usb_pipeendpoint (pipe);
+       is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
+       maxp = usb_maxpacket(dev, pipe, !is_input);
+       if (is_input) {
+               buf1 = (1 << 11);
+       } else {
+               buf1 = 0;
+       }
+
+       maxp = max_packet(maxp);
+       multi = hb_mult(maxp);
+       buf1 |= maxp;
+       maxp *= multi;
+
+       stream->buf0 = cpu_to_hc32(fusbh200, (epnum << 8) | dev->devnum);
+       stream->buf1 = cpu_to_hc32(fusbh200, buf1);
+       stream->buf2 = cpu_to_hc32(fusbh200, multi);
+
+       /* usbfs wants to report the average usecs per frame tied up
+        * when transfers on this endpoint are scheduled ...
+        */
+       if (dev->speed == USB_SPEED_FULL) {
+               interval <<= 3;
+               stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
+                               is_input, 1, maxp));
+               stream->usecs /= 8;
+       } else {
+               stream->highspeed = 1;
+               stream->usecs = HS_USECS_ISO (maxp);
+       }
+       bandwidth = stream->usecs * 8;
+       bandwidth /= interval;
+
+       stream->bandwidth = bandwidth;
+       stream->udev = dev;
+       stream->bEndpointAddress = is_input | epnum;
+       stream->interval = interval;
+       stream->maxp = maxp;
+}
+
+static struct fusbh200_iso_stream *
+iso_stream_find (struct fusbh200_hcd *fusbh200, struct urb *urb)
+{
+       unsigned                epnum;
+       struct fusbh200_iso_stream      *stream;
+       struct usb_host_endpoint *ep;
+       unsigned long           flags;
+
+       epnum = usb_pipeendpoint (urb->pipe);
+       if (usb_pipein(urb->pipe))
+               ep = urb->dev->ep_in[epnum];
+       else
+               ep = urb->dev->ep_out[epnum];
+
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       stream = ep->hcpriv;
+
+       if (unlikely (stream == NULL)) {
+               stream = iso_stream_alloc(GFP_ATOMIC);
+               if (likely (stream != NULL)) {
+                       ep->hcpriv = stream;
+                       stream->ep = ep;
+                       iso_stream_init(fusbh200, stream, urb->dev, urb->pipe,
+                                       urb->interval);
+               }
+
+       /* if dev->ep [epnum] is a QH, hw is set */
+       } else if (unlikely (stream->hw != NULL)) {
+               fusbh200_dbg (fusbh200, "dev %s ep%d%s, not iso??\n",
+                       urb->dev->devpath, epnum,
+                       usb_pipein(urb->pipe) ? "in" : "out");
+               stream = NULL;
+       }
+
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       return stream;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fusbh200_iso_sched ops can be ITD-only or SITD-only */
+
+static struct fusbh200_iso_sched *
+iso_sched_alloc (unsigned packets, gfp_t mem_flags)
+{
+       struct fusbh200_iso_sched       *iso_sched;
+       int                     size = sizeof *iso_sched;
+
+       size += packets * sizeof (struct fusbh200_iso_packet);
+       iso_sched = kzalloc(size, mem_flags);
+       if (likely (iso_sched != NULL)) {
+               INIT_LIST_HEAD (&iso_sched->td_list);
+       }
+       return iso_sched;
+}
+
+static inline void
+itd_sched_init(
+       struct fusbh200_hcd             *fusbh200,
+       struct fusbh200_iso_sched       *iso_sched,
+       struct fusbh200_iso_stream      *stream,
+       struct urb              *urb
+)
+{
+       unsigned        i;
+       dma_addr_t      dma = urb->transfer_dma;
+
+       /* how many uframes are needed for these transfers */
+       iso_sched->span = urb->number_of_packets * stream->interval;
+
+       /* figure out per-uframe itd fields that we'll need later
+        * when we fit new itds into the schedule.
+        */
+       for (i = 0; i < urb->number_of_packets; i++) {
+               struct fusbh200_iso_packet      *uframe = &iso_sched->packet [i];
+               unsigned                length;
+               dma_addr_t              buf;
+               u32                     trans;
+
+               length = urb->iso_frame_desc [i].length;
+               buf = dma + urb->iso_frame_desc [i].offset;
+
+               trans = FUSBH200_ISOC_ACTIVE;
+               trans |= buf & 0x0fff;
+               if (unlikely (((i + 1) == urb->number_of_packets))
+                               && !(urb->transfer_flags & URB_NO_INTERRUPT))
+                       trans |= FUSBH200_ITD_IOC;
+               trans |= length << 16;
+               uframe->transaction = cpu_to_hc32(fusbh200, trans);
+
+               /* might need to cross a buffer page within a uframe */
+               uframe->bufp = (buf & ~(u64)0x0fff);
+               buf += length;
+               if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
+                       uframe->cross = 1;
+       }
+}
+
+static void
+iso_sched_free (
+       struct fusbh200_iso_stream      *stream,
+       struct fusbh200_iso_sched       *iso_sched
+)
+{
+       if (!iso_sched)
+               return;
+       // caller must hold fusbh200->lock!
+       list_splice (&iso_sched->td_list, &stream->free_list);
+       kfree (iso_sched);
+}
+
+static int
+itd_urb_transaction (
+       struct fusbh200_iso_stream      *stream,
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       gfp_t                   mem_flags
+)
+{
+       struct fusbh200_itd             *itd;
+       dma_addr_t              itd_dma;
+       int                     i;
+       unsigned                num_itds;
+       struct fusbh200_iso_sched       *sched;
+       unsigned long           flags;
+
+       sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
+       if (unlikely (sched == NULL))
+               return -ENOMEM;
+
+       itd_sched_init(fusbh200, sched, stream, urb);
+
+       if (urb->interval < 8)
+               num_itds = 1 + (sched->span + 7) / 8;
+       else
+               num_itds = urb->number_of_packets;
+
+       /* allocate/init ITDs */
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       for (i = 0; i < num_itds; i++) {
+
+               /*
+                * Use iTDs from the free list, but not iTDs that may
+                * still be in use by the hardware.
+                */
+               if (likely(!list_empty(&stream->free_list))) {
+                       itd = list_first_entry(&stream->free_list,
+                                       struct fusbh200_itd, itd_list);
+                       if (itd->frame == fusbh200->now_frame)
+                               goto alloc_itd;
+                       list_del (&itd->itd_list);
+                       itd_dma = itd->itd_dma;
+               } else {
+ alloc_itd:
+                       spin_unlock_irqrestore (&fusbh200->lock, flags);
+                       itd = dma_pool_alloc (fusbh200->itd_pool, mem_flags,
+                                       &itd_dma);
+                       spin_lock_irqsave (&fusbh200->lock, flags);
+                       if (!itd) {
+                               iso_sched_free(stream, sched);
+                               spin_unlock_irqrestore(&fusbh200->lock, flags);
+                               return -ENOMEM;
+                       }
+               }
+
+               memset (itd, 0, sizeof *itd);
+               itd->itd_dma = itd_dma;
+               list_add (&itd->itd_list, &sched->td_list);
+       }
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+
+       /* temporarily store schedule info in hcpriv */
+       urb->hcpriv = sched;
+       urb->error_count = 0;
+       return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+itd_slot_ok (
+       struct fusbh200_hcd             *fusbh200,
+       u32                     mod,
+       u32                     uframe,
+       u8                      usecs,
+       u32                     period
+)
+{
+       uframe %= period;
+       do {
+               /* can't commit more than uframe_periodic_max usec */
+               if (periodic_usecs (fusbh200, uframe >> 3, uframe & 0x7)
+                               > (fusbh200->uframe_periodic_max - usecs))
+                       return 0;
+
+               /* we know urb->interval is 2^N uframes */
+               uframe += period;
+       } while (uframe < mod);
+       return 1;
+}
+
+/*
+ * This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.)  That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than fusbh200's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given FUSBH200_TUNE_FLS and the slop).  Or, write a smarter scheduler!
+ */
+
+#define SCHEDULE_SLOP  80      /* microframes */
+
+static int
+iso_stream_schedule (
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       struct fusbh200_iso_stream      *stream
+)
+{
+       u32                     now, next, start, period, span;
+       int                     status;
+       unsigned                mod = fusbh200->periodic_size << 3;
+       struct fusbh200_iso_sched       *sched = urb->hcpriv;
+
+       period = urb->interval;
+       span = sched->span;
+
+       if (span > mod - SCHEDULE_SLOP) {
+               fusbh200_dbg (fusbh200, "iso request %p too long\n", urb);
+               status = -EFBIG;
+               goto fail;
+       }
+
+       now = fusbh200_read_frame_index(fusbh200) & (mod - 1);
+
+       /* Typical case: reuse current schedule, stream is still active.
+        * Hopefully there are no gaps from the host falling behind
+        * (irq delays etc), but if there are we'll take the next
+        * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+        */
+       if (likely (!list_empty (&stream->td_list))) {
+               u32     excess;
+
+               /* For high speed devices, allow scheduling within the
+                * isochronous scheduling threshold.  For full speed devices
+                * and Intel PCI-based controllers, don't (work around for
+                * Intel ICH9 bug).
+                */
+               if (!stream->highspeed && fusbh200->fs_i_thresh)
+                       next = now + fusbh200->i_thresh;
+               else
+                       next = now;
+
+               /* Fell behind (by up to twice the slop amount)?
+                * We decide based on the time of the last currently-scheduled
+                * slot, not the time of the next available slot.
+                */
+               excess = (stream->next_uframe - period - next) & (mod - 1);
+               if (excess >= mod - 2 * SCHEDULE_SLOP)
+                       start = next + excess - mod + period *
+                                       DIV_ROUND_UP(mod - excess, period);
+               else
+                       start = next + excess + period;
+               if (start - now >= mod) {
+                       fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
+                                       urb, start - now - period, period,
+                                       mod);
+                       status = -EFBIG;
+                       goto fail;
+               }
+       }
+
+       /* need to schedule; when's the next (u)frame we could start?
+        * this is bigger than fusbh200->i_thresh allows; scheduling itself
+        * isn't free, the slop should handle reasonably slow cpus.  it
+        * can also help high bandwidth if the dma and irq loads don't
+        * jump until after the queue is primed.
+        */
+       else {
+               int done = 0;
+               start = SCHEDULE_SLOP + (now & ~0x07);
+
+               /* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+               /* find a uframe slot with enough bandwidth.
+                * Early uframes are more precious because full-speed
+                * iso IN transfers can't use late uframes,
+                * and therefore they should be allocated last.
+                */
+               next = start;
+               start += period;
+               do {
+                       start--;
+                       /* check schedule: enough space? */
+                       if (itd_slot_ok(fusbh200, mod, start,
+                                       stream->usecs, period))
+                               done = 1;
+               } while (start > next && !done);
+
+               /* no room in the schedule */
+               if (!done) {
+                       fusbh200_dbg(fusbh200, "iso resched full %p (now %d max %d)\n",
+                               urb, now, now + mod);
+                       status = -ENOSPC;
+                       goto fail;
+               }
+       }
+
+       /* Tried to schedule too far into the future? */
+       if (unlikely(start - now + span - period
+                               >= mod - 2 * SCHEDULE_SLOP)) {
+               fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
+                               urb, start - now, span - period,
+                               mod - 2 * SCHEDULE_SLOP);
+               status = -EFBIG;
+               goto fail;
+       }
+
+       stream->next_uframe = start & (mod - 1);
+
+       /* report high speed start in uframes; full speed, in frames */
+       urb->start_frame = stream->next_uframe;
+       if (!stream->highspeed)
+               urb->start_frame >>= 3;
+
+       /* Make sure scan_isoc() sees these */
+       if (fusbh200->isoc_count == 0)
+               fusbh200->next_frame = now >> 3;
+       return 0;
+
+ fail:
+       iso_sched_free(stream, sched);
+       urb->hcpriv = NULL;
+       return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+itd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_iso_stream *stream,
+               struct fusbh200_itd *itd)
+{
+       int i;
+
+       /* it's been recently zeroed */
+       itd->hw_next = FUSBH200_LIST_END(fusbh200);
+       itd->hw_bufp [0] = stream->buf0;
+       itd->hw_bufp [1] = stream->buf1;
+       itd->hw_bufp [2] = stream->buf2;
+
+       for (i = 0; i < 8; i++)
+               itd->index[i] = -1;
+
+       /* All other fields are filled when scheduling */
+}
+
+static inline void
+itd_patch(
+       struct fusbh200_hcd             *fusbh200,
+       struct fusbh200_itd             *itd,
+       struct fusbh200_iso_sched       *iso_sched,
+       unsigned                index,
+       u16                     uframe
+)
+{
+       struct fusbh200_iso_packet      *uf = &iso_sched->packet [index];
+       unsigned                pg = itd->pg;
+
+       // BUG_ON (pg == 6 && uf->cross);
+
+       uframe &= 0x07;
+       itd->index [uframe] = index;
+
+       itd->hw_transaction[uframe] = uf->transaction;
+       itd->hw_transaction[uframe] |= cpu_to_hc32(fusbh200, pg << 12);
+       itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, uf->bufp & ~(u32)0);
+       itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(uf->bufp >> 32));
+
+       /* iso_frame_desc[].offset must be strictly increasing */
+       if (unlikely (uf->cross)) {
+               u64     bufp = uf->bufp + 4096;
+
+               itd->pg = ++pg;
+               itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, bufp & ~(u32)0);
+               itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(bufp >> 32));
+       }
+}
+
+static inline void
+itd_link (struct fusbh200_hcd *fusbh200, unsigned frame, struct fusbh200_itd *itd)
+{
+       union fusbh200_shadow   *prev = &fusbh200->pshadow[frame];
+       __hc32                  *hw_p = &fusbh200->periodic[frame];
+       union fusbh200_shadow   here = *prev;
+       __hc32                  type = 0;
+
+       /* skip any iso nodes which might belong to previous microframes */
+       while (here.ptr) {
+               type = Q_NEXT_TYPE(fusbh200, *hw_p);
+               if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
+                       break;
+               prev = periodic_next_shadow(fusbh200, prev, type);
+               hw_p = shadow_next_periodic(fusbh200, &here, type);
+               here = *prev;
+       }
+
+       itd->itd_next = here;
+       itd->hw_next = *hw_p;
+       prev->itd = itd;
+       itd->frame = frame;
+       wmb ();
+       *hw_p = cpu_to_hc32(fusbh200, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(
+       struct fusbh200_hcd             *fusbh200,
+       struct urb              *urb,
+       unsigned                mod,
+       struct fusbh200_iso_stream      *stream
+)
+{
+       int                     packet;
+       unsigned                next_uframe, uframe, frame;
+       struct fusbh200_iso_sched       *iso_sched = urb->hcpriv;
+       struct fusbh200_itd             *itd;
+
+       next_uframe = stream->next_uframe & (mod - 1);
+
+       if (unlikely (list_empty(&stream->td_list))) {
+               fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
+                               += stream->bandwidth;
+               fusbh200_vdbg (fusbh200,
+                       "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
+                       urb->dev->devpath, stream->bEndpointAddress & 0x0f,
+                       (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+                       urb->interval,
+                       next_uframe >> 3, next_uframe & 0x7);
+       }
+
+       /* fill iTDs uframe by uframe */
+       for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
+               if (itd == NULL) {
+                       /* ASSERT:  we have all necessary itds */
+                       // BUG_ON (list_empty (&iso_sched->td_list));
+
+                       /* ASSERT:  no itds for this endpoint in this uframe */
+
+                       itd = list_entry (iso_sched->td_list.next,
+                                       struct fusbh200_itd, itd_list);
+                       list_move_tail (&itd->itd_list, &stream->td_list);
+                       itd->stream = stream;
+                       itd->urb = urb;
+                       itd_init (fusbh200, stream, itd);
+               }
+
+               uframe = next_uframe & 0x07;
+               frame = next_uframe >> 3;
+
+               itd_patch(fusbh200, itd, iso_sched, packet, uframe);
+
+               next_uframe += stream->interval;
+               next_uframe &= mod - 1;
+               packet++;
+
+               /* link completed itds into the schedule */
+               if (((next_uframe >> 3) != frame)
+                               || packet == urb->number_of_packets) {
+                       itd_link(fusbh200, frame & (fusbh200->periodic_size - 1), itd);
+                       itd = NULL;
+               }
+       }
+       stream->next_uframe = next_uframe;
+
+       /* don't need that schedule data any more */
+       iso_sched_free (stream, iso_sched);
+       urb->hcpriv = NULL;
+
+       ++fusbh200->isoc_count;
+       enable_periodic(fusbh200);
+}
+
+#define        ISO_ERRS (FUSBH200_ISOC_BUF_ERR | FUSBH200_ISOC_BABBLE | FUSBH200_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD.  Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly.  That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs.  It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
+{
+       struct urb                              *urb = itd->urb;
+       struct usb_iso_packet_descriptor        *desc;
+       u32                                     t;
+       unsigned                                uframe;
+       int                                     urb_index = -1;
+       struct fusbh200_iso_stream                      *stream = itd->stream;
+       struct usb_device                       *dev;
+       bool                                    retval = false;
+
+       /* for each uframe with a packet */
+       for (uframe = 0; uframe < 8; uframe++) {
+               if (likely (itd->index[uframe] == -1))
+                       continue;
+               urb_index = itd->index[uframe];
+               desc = &urb->iso_frame_desc [urb_index];
+
+               t = hc32_to_cpup(fusbh200, &itd->hw_transaction [uframe]);
+               itd->hw_transaction [uframe] = 0;
+
+               /* report transfer status */
+               if (unlikely (t & ISO_ERRS)) {
+                       urb->error_count++;
+                       if (t & FUSBH200_ISOC_BUF_ERR)
+                               desc->status = usb_pipein (urb->pipe)
+                                       ? -ENOSR  /* hc couldn't read */
+                                       : -ECOMM; /* hc couldn't write */
+                       else if (t & FUSBH200_ISOC_BABBLE)
+                               desc->status = -EOVERFLOW;
+                       else /* (t & FUSBH200_ISOC_XACTERR) */
+                               desc->status = -EPROTO;
+
+                       /* HC need not update length with this error */
+                       if (!(t & FUSBH200_ISOC_BABBLE)) {
+                               desc->actual_length = fusbh200_itdlen(urb, desc, t);
+                               urb->actual_length += desc->actual_length;
+                       }
+               } else if (likely ((t & FUSBH200_ISOC_ACTIVE) == 0)) {
+                       desc->status = 0;
+                       desc->actual_length = fusbh200_itdlen(urb, desc, t);
+                       urb->actual_length += desc->actual_length;
+               } else {
+                       /* URB was too late */
+                       desc->status = -EXDEV;
+               }
+       }
+
+       /* handle completion now? */
+       if (likely ((urb_index + 1) != urb->number_of_packets))
+               goto done;
+
+       /* ASSERT: it's really the last itd for this urb
+       list_for_each_entry (itd, &stream->td_list, itd_list)
+               BUG_ON (itd->urb == urb);
+        */
+
+       /* give urb back to the driver; completion often (re)submits */
+       dev = urb->dev;
+       fusbh200_urb_done(fusbh200, urb, 0);
+       retval = true;
+       urb = NULL;
+
+       --fusbh200->isoc_count;
+       disable_periodic(fusbh200);
+
+       if (unlikely(list_is_singular(&stream->td_list))) {
+               fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
+                               -= stream->bandwidth;
+               fusbh200_vdbg (fusbh200,
+                       "deschedule devp %s ep%d%s-iso\n",
+                       dev->devpath, stream->bEndpointAddress & 0x0f,
+                       (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+       }
+
+done:
+       itd->urb = NULL;
+
+       /* Add to the end of the free list for later reuse */
+       list_move_tail(&itd->itd_list, &stream->free_list);
+
+       /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+       if (list_empty(&stream->td_list)) {
+               list_splice_tail_init(&stream->free_list,
+                               &fusbh200->cached_itd_list);
+               start_free_itds(fusbh200);
+       }
+
+       return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int itd_submit (struct fusbh200_hcd *fusbh200, struct urb *urb,
+       gfp_t mem_flags)
+{
+       int                     status = -EINVAL;
+       unsigned long           flags;
+       struct fusbh200_iso_stream      *stream;
+
+       /* Get iso_stream head */
+       stream = iso_stream_find (fusbh200, urb);
+       if (unlikely (stream == NULL)) {
+               fusbh200_dbg (fusbh200, "can't get iso stream\n");
+               return -ENOMEM;
+       }
+       if (unlikely (urb->interval != stream->interval &&
+                     fusbh200_port_speed(fusbh200, 0) == USB_PORT_STAT_HIGH_SPEED)) {
+                       fusbh200_dbg (fusbh200, "can't change iso interval %d --> %d\n",
+                               stream->interval, urb->interval);
+                       goto done;
+       }
+
+#ifdef FUSBH200_URB_TRACE
+       fusbh200_dbg (fusbh200,
+               "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
+               __func__, urb->dev->devpath, urb,
+               usb_pipeendpoint (urb->pipe),
+               usb_pipein (urb->pipe) ? "in" : "out",
+               urb->transfer_buffer_length,
+               urb->number_of_packets, urb->interval,
+               stream);
+#endif
+
+       /* allocate ITDs w/o locking anything */
+       status = itd_urb_transaction (stream, fusbh200, urb, mem_flags);
+       if (unlikely (status < 0)) {
+               fusbh200_dbg (fusbh200, "can't init itds\n");
+               goto done;
+       }
+
+       /* schedule ... need to lock */
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
+               status = -ESHUTDOWN;
+               goto done_not_linked;
+       }
+       status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
+       if (unlikely(status))
+               goto done_not_linked;
+       status = iso_stream_schedule(fusbh200, urb, stream);
+       if (likely (status == 0))
+               itd_link_urb (fusbh200, urb, fusbh200->periodic_size << 3, stream);
+       else
+               usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
+ done_not_linked:
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+ done:
+       return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_isoc(struct fusbh200_hcd *fusbh200)
+{
+       unsigned        uf, now_frame, frame;
+       unsigned        fmask = fusbh200->periodic_size - 1;
+       bool            modified, live;
+
+       /*
+        * When running, scan from last scan point up to "now"
+        * else clean up by scanning everything that's left.
+        * Touches as few pages as possible:  cache-friendly.
+        */
+       if (fusbh200->rh_state >= FUSBH200_RH_RUNNING) {
+               uf = fusbh200_read_frame_index(fusbh200);
+               now_frame = (uf >> 3) & fmask;
+               live = true;
+       } else  {
+               now_frame = (fusbh200->next_frame - 1) & fmask;
+               live = false;
+       }
+       fusbh200->now_frame = now_frame;
+
+       frame = fusbh200->next_frame;
+       for (;;) {
+               union fusbh200_shadow   q, *q_p;
+               __hc32                  type, *hw_p;
+
+restart:
+               /* scan each element in frame's queue for completions */
+               q_p = &fusbh200->pshadow [frame];
+               hw_p = &fusbh200->periodic [frame];
+               q.ptr = q_p->ptr;
+               type = Q_NEXT_TYPE(fusbh200, *hw_p);
+               modified = false;
+
+               while (q.ptr != NULL) {
+                       switch (hc32_to_cpu(fusbh200, type)) {
+                       case Q_TYPE_ITD:
+                               /* If this ITD is still active, leave it for
+                                * later processing ... check the next entry.
+                                * No need to check for activity unless the
+                                * frame is current.
+                                */
+                               if (frame == now_frame && live) {
+                                       rmb();
+                                       for (uf = 0; uf < 8; uf++) {
+                                               if (q.itd->hw_transaction[uf] &
+                                                           ITD_ACTIVE(fusbh200))
+                                                       break;
+                                       }
+                                       if (uf < 8) {
+                                               q_p = &q.itd->itd_next;
+                                               hw_p = &q.itd->hw_next;
+                                               type = Q_NEXT_TYPE(fusbh200,
+                                                       q.itd->hw_next);
+                                               q = *q_p;
+                                               break;
+                                       }
+                               }
+
+                               /* Take finished ITDs out of the schedule
+                                * and process them:  recycle, maybe report
+                                * URB completion.  HC won't cache the
+                                * pointer for much longer, if at all.
+                                */
+                               *q_p = q.itd->itd_next;
+                               *hw_p = q.itd->hw_next;
+                               type = Q_NEXT_TYPE(fusbh200, q.itd->hw_next);
+                               wmb();
+                               modified = itd_complete (fusbh200, q.itd);
+                               q = *q_p;
+                               break;
+                       default:
+                               fusbh200_dbg(fusbh200, "corrupt type %d frame %d shadow %p\n",
+                                       type, frame, q.ptr);
+                               // BUG ();
+                               /* FALL THROUGH */
+                       case Q_TYPE_QH:
+                       case Q_TYPE_FSTN:
+                               /* End of the iTDs and siTDs */
+                               q.ptr = NULL;
+                               break;
+                       }
+
+                       /* assume completion callbacks modify the queue */
+                       if (unlikely(modified && fusbh200->isoc_count > 0))
+                               goto restart;
+               }
+
+               /* Stop when we have reached the current frame */
+               if (frame == now_frame)
+                       break;
+               frame = (frame + 1) & fmask;
+       }
+       fusbh200->next_frame = now_frame;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct fusbh200_hcd             *fusbh200;
+       int                     n;
+
+       fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
+       n = scnprintf(buf, PAGE_SIZE, "%d\n", fusbh200->uframe_periodic_max);
+       return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct fusbh200_hcd             *fusbh200;
+       unsigned                uframe_periodic_max;
+       unsigned                frame, uframe;
+       unsigned short          allocated_max;
+       unsigned long           flags;
+       ssize_t                 ret;
+
+       fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
+       if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+               return -EINVAL;
+
+       if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+               fusbh200_info(fusbh200, "rejecting invalid request for "
+                               "uframe_periodic_max=%u\n", uframe_periodic_max);
+               return -EINVAL;
+       }
+
+       ret = -EINVAL;
+
+       /*
+        * lock, so that our checking does not race with possible periodic
+        * bandwidth allocation through submitting new urbs.
+        */
+       spin_lock_irqsave (&fusbh200->lock, flags);
+
+       /*
+        * for request to decrease max periodic bandwidth, we have to check
+        * every microframe in the schedule to see whether the decrease is
+        * possible.
+        */
+       if (uframe_periodic_max < fusbh200->uframe_periodic_max) {
+               allocated_max = 0;
+
+               for (frame = 0; frame < fusbh200->periodic_size; ++frame)
+                       for (uframe = 0; uframe < 7; ++uframe)
+                               allocated_max = max(allocated_max,
+                                                   periodic_usecs (fusbh200, frame, uframe));
+
+               if (allocated_max > uframe_periodic_max) {
+                       fusbh200_info(fusbh200,
+                               "cannot decrease uframe_periodic_max becase "
+                               "periodic bandwidth is already allocated "
+                               "(%u > %u)\n",
+                               allocated_max, uframe_periodic_max);
+                       goto out_unlock;
+               }
+       }
+
+       /* increasing is always ok */
+
+       fusbh200_info(fusbh200, "setting max periodic bandwidth to %u%% "
+                       "(== %u usec/uframe)\n",
+                       100*uframe_periodic_max/125, uframe_periodic_max);
+
+       if (uframe_periodic_max != 100)
+               fusbh200_warn(fusbh200, "max periodic bandwidth set is non-standard\n");
+
+       fusbh200->uframe_periodic_max = uframe_periodic_max;
+       ret = count;
+
+out_unlock:
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       return ret;
+}
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
+
+
+static inline int create_sysfs_files(struct fusbh200_hcd *fusbh200)
+{
+       struct device   *controller = fusbh200_to_hcd(fusbh200)->self.controller;
+       int     i = 0;
+
+       if (i)
+               goto out;
+
+       i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+       return i;
+}
+
+static inline void remove_sysfs_files(struct fusbh200_hcd *fusbh200)
+{
+       struct device   *controller = fusbh200_to_hcd(fusbh200)->self.controller;
+
+       device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
+/*-------------------------------------------------------------------------*/
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void fusbh200_turn_off_all_ports(struct fusbh200_hcd *fusbh200)
+{
+       u32 __iomem *status_reg = &fusbh200->regs->port_status;
+
+       fusbh200_writel(fusbh200, PORT_RWC_BITS, status_reg);
+}
+
+/*
+ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fusbh200_silence_controller(struct fusbh200_hcd *fusbh200)
+{
+       fusbh200_halt(fusbh200);
+
+       spin_lock_irq(&fusbh200->lock);
+       fusbh200->rh_state = FUSBH200_RH_HALTED;
+       fusbh200_turn_off_all_ports(fusbh200);
+       spin_unlock_irq(&fusbh200->lock);
+}
+
+/* fusbh200_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void fusbh200_shutdown(struct usb_hcd *hcd)
+{
+       struct fusbh200_hcd     *fusbh200 = hcd_to_fusbh200(hcd);
+
+       spin_lock_irq(&fusbh200->lock);
+       fusbh200->shutdown = true;
+       fusbh200->rh_state = FUSBH200_RH_STOPPING;
+       fusbh200->enabled_hrtimer_events = 0;
+       spin_unlock_irq(&fusbh200->lock);
+
+       fusbh200_silence_controller(fusbh200);
+
+       hrtimer_cancel(&fusbh200->hrtimer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * fusbh200_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping fusbh200->lock.
+ */
+static void fusbh200_work (struct fusbh200_hcd *fusbh200)
+{
+       /* another CPU may drop fusbh200->lock during a schedule scan while
+        * it reports urb completions.  this flag guards against bogus
+        * attempts at re-entrant schedule scanning.
+        */
+       if (fusbh200->scanning) {
+               fusbh200->need_rescan = true;
+               return;
+       }
+       fusbh200->scanning = true;
+
+ rescan:
+       fusbh200->need_rescan = false;
+       if (fusbh200->async_count)
+               scan_async(fusbh200);
+       if (fusbh200->intr_count > 0)
+               scan_intr(fusbh200);
+       if (fusbh200->isoc_count > 0)
+               scan_isoc(fusbh200);
+       if (fusbh200->need_rescan)
+               goto rescan;
+       fusbh200->scanning = false;
+
+       /* the IO watchdog guards against hardware or driver bugs that
+        * misplace IRQs, and should let us run completely without IRQs.
+        * such lossage has been observed on both VT6202 and VT8235.
+        */
+       turn_on_io_watchdog(fusbh200);
+}
+
+/*
+ * Called when the fusbh200_hcd module is removed.
+ */
+static void fusbh200_stop (struct usb_hcd *hcd)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200 (hcd);
+
+       fusbh200_dbg (fusbh200, "stop\n");
+
+       /* no more interrupts ... */
+
+       spin_lock_irq(&fusbh200->lock);
+       fusbh200->enabled_hrtimer_events = 0;
+       spin_unlock_irq(&fusbh200->lock);
+
+       fusbh200_quiesce(fusbh200);
+       fusbh200_silence_controller(fusbh200);
+       fusbh200_reset (fusbh200);
+
+       hrtimer_cancel(&fusbh200->hrtimer);
+       remove_sysfs_files(fusbh200);
+       remove_debug_files (fusbh200);
+
+       /* root hub is shut down separately (first, when possible) */
+       spin_lock_irq (&fusbh200->lock);
+       end_free_itds(fusbh200);
+       spin_unlock_irq (&fusbh200->lock);
+       fusbh200_mem_cleanup (fusbh200);
+
+#ifdef FUSBH200_STATS
+       fusbh200_dbg(fusbh200, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+               fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
+               fusbh200->stats.lost_iaa);
+       fusbh200_dbg (fusbh200, "complete %ld unlink %ld\n",
+               fusbh200->stats.complete, fusbh200->stats.unlink);
+#endif
+
+       dbg_status (fusbh200, "fusbh200_stop completed",
+                   fusbh200_readl(fusbh200, &fusbh200->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int hcd_fusbh200_init(struct usb_hcd *hcd)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200(hcd);
+       u32                     temp;
+       int                     retval;
+       u32                     hcc_params;
+       struct fusbh200_qh_hw   *hw;
+
+       spin_lock_init(&fusbh200->lock);
+
+       /*
+        * keep io watchdog by default, those good HCDs could turn off it later
+        */
+       fusbh200->need_io_watchdog = 1;
+
+       hrtimer_init(&fusbh200->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       fusbh200->hrtimer.function = fusbh200_hrtimer_func;
+       fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
+
+       hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+       /*
+        * by default set standard 80% (== 100 usec/uframe) max periodic
+        * bandwidth as required by USB 2.0
+        */
+       fusbh200->uframe_periodic_max = 100;
+
+       /*
+        * hw default: 1K periodic list heads, one per frame.
+        * periodic_size can shrink by USBCMD update if hcc_params allows.
+        */
+       fusbh200->periodic_size = DEFAULT_I_TDPS;
+       INIT_LIST_HEAD(&fusbh200->intr_qh_list);
+       INIT_LIST_HEAD(&fusbh200->cached_itd_list);
+
+       if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+               /* periodic schedule size can be smaller than default */
+               switch (FUSBH200_TUNE_FLS) {
+               case 0: fusbh200->periodic_size = 1024; break;
+               case 1: fusbh200->periodic_size = 512; break;
+               case 2: fusbh200->periodic_size = 256; break;
+               default:        BUG();
+               }
+       }
+       if ((retval = fusbh200_mem_init(fusbh200, GFP_KERNEL)) < 0)
+               return retval;
+
+       /* controllers may cache some of the periodic schedule ... */
+       fusbh200->i_thresh = 2;
+
+       /*
+        * dedicate a qh for the async ring head, since we couldn't unlink
+        * a 'real' qh without stopping the async schedule [4.8].  use it
+        * as the 'reclamation list head' too.
+        * its dummy is used in hw_alt_next of many tds, to prevent the qh
+        * from automatically advancing to the next td after short reads.
+        */
+       fusbh200->async->qh_next.qh = NULL;
+       hw = fusbh200->async->hw;
+       hw->hw_next = QH_NEXT(fusbh200, fusbh200->async->qh_dma);
+       hw->hw_info1 = cpu_to_hc32(fusbh200, QH_HEAD);
+       hw->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
+       hw->hw_qtd_next = FUSBH200_LIST_END(fusbh200);
+       fusbh200->async->qh_state = QH_STATE_LINKED;
+       hw->hw_alt_next = QTD_NEXT(fusbh200, fusbh200->async->dummy->qtd_dma);
+
+       /* clear interrupt enables, set irq latency */
+       if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+               log2_irq_thresh = 0;
+       temp = 1 << (16 + log2_irq_thresh);
+       if (HCC_CANPARK(hcc_params)) {
+               /* HW default park == 3, on hardware that supports it (like
+                * NVidia and ALI silicon), maximizes throughput on the async
+                * schedule by avoiding QH fetches between transfers.
+                *
+                * With fast usb storage devices and NForce2, "park" seems to
+                * make problems:  throughput reduction (!), data errors...
+                */
+               if (park) {
+                       park = min(park, (unsigned) 3);
+                       temp |= CMD_PARK;
+                       temp |= park << 8;
+               }
+               fusbh200_dbg(fusbh200, "park %d\n", park);
+       }
+       if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+               /* periodic schedule size can be smaller than default */
+               temp &= ~(3 << 2);
+               temp |= (FUSBH200_TUNE_FLS << 2);
+       }
+       fusbh200->command = temp;
+
+       /* Accept arbitrarily long scatter-gather lists */
+       if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+               hcd->self.sg_tablesize = ~0;
+       return 0;
+}
+
+/* start HC running; it's halted, hcd_fusbh200_init() has been run (once) */
+static int fusbh200_run (struct usb_hcd *hcd)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200 (hcd);
+       u32                     temp;
+       u32                     hcc_params;
+
+       hcd->uses_new_polling = 1;
+
+       /* EHCI spec section 4.1 */
+
+       fusbh200_writel(fusbh200, fusbh200->periodic_dma, &fusbh200->regs->frame_list);
+       fusbh200_writel(fusbh200, (u32)fusbh200->async->qh_dma, &fusbh200->regs->async_next);
+
+       /*
+        * hcc_params controls whether fusbh200->regs->segment must (!!!)
+        * be used; it constrains QH/ITD/SITD and QTD locations.
+        * pci_pool consistent memory always uses segment zero.
+        * streaming mappings for I/O buffers, like pci_map_single(),
+        * can return segments above 4GB, if the device allows.
+        *
+        * NOTE:  the dma mask is visible through dma_supported(), so
+        * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+        * Scsi_Host.highmem_io, and so forth.  It's readonly to all
+        * host side drivers though.
+        */
+       hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
+
+       // Philips, Intel, and maybe others need CMD_RUN before the
+       // root hub will detect new devices (why?); NEC doesn't
+       fusbh200->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+       fusbh200->command |= CMD_RUN;
+       fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+       dbg_cmd (fusbh200, "init", fusbh200->command);
+
+       /*
+        * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+        * are explicitly handed to companion controller(s), so no TT is
+        * involved with the root hub.  (Except where one is integrated,
+        * and there's no companion controller unless maybe for USB OTG.)
+        *
+        * Turning on the CF flag will transfer ownership of all ports
+        * from the companions to the EHCI controller.  If any of the
+        * companions are in the middle of a port reset at the time, it
+        * could cause trouble.  Write-locking ehci_cf_port_reset_rwsem
+        * guarantees that no resets are in progress.  After we set CF,
+        * a short delay lets the hardware catch up; new resets shouldn't
+        * be started before the port switching actions could complete.
+        */
+       down_write(&ehci_cf_port_reset_rwsem);
+       fusbh200->rh_state = FUSBH200_RH_RUNNING;
+       fusbh200_readl(fusbh200, &fusbh200->regs->command);     /* unblock posted writes */
+       msleep(5);
+       up_write(&ehci_cf_port_reset_rwsem);
+       fusbh200->last_periodic_enable = ktime_get_real();
+
+       temp = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+       fusbh200_info (fusbh200,
+               "USB %x.%x started, EHCI %x.%02x\n",
+               ((fusbh200->sbrn & 0xf0)>>4), (fusbh200->sbrn & 0x0f),
+               temp >> 8, temp & 0xff);
+
+       fusbh200_writel(fusbh200, INTR_MASK,
+                   &fusbh200->regs->intr_enable); /* Turn On Interrupts */
+
+       /* GRR this is run-once init(), being done every time the HC starts.
+        * So long as they're part of class devices, we can't do it init()
+        * since the class device isn't created that early.
+        */
+       create_debug_files(fusbh200);
+       create_sysfs_files(fusbh200);
+
+       return 0;
+}
+
+static int fusbh200_setup(struct usb_hcd *hcd)
+{
+       struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
+       int retval;
+
+       fusbh200->regs = (void __iomem *)fusbh200->caps +
+           HC_LENGTH(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
+       dbg_hcs_params(fusbh200, "reset");
+       dbg_hcc_params(fusbh200, "reset");
+
+       /* cache this readonly data; minimize chip reads */
+       fusbh200->hcs_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
+
+       fusbh200->sbrn = HCD_USB2;
+
+       /* data structure init */
+       retval = hcd_fusbh200_init(hcd);
+       if (retval)
+               return retval;
+
+       retval = fusbh200_halt(fusbh200);
+       if (retval)
+               return retval;
+
+       fusbh200_reset(fusbh200);
+
+       return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t fusbh200_irq (struct usb_hcd *hcd)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200 (hcd);
+       u32                     status, masked_status, pcd_status = 0, cmd;
+       int                     bh;
+
+       spin_lock (&fusbh200->lock);
+
+       status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
+
+       /* e.g. cardbus physical eject */
+       if (status == ~(u32) 0) {
+               fusbh200_dbg (fusbh200, "device removed\n");
+               goto dead;
+       }
+
+       /*
+        * We don't use STS_FLR, but some controllers don't like it to
+        * remain on, so mask it out along with the other status bits.
+        */
+       masked_status = status & (INTR_MASK | STS_FLR);
+
+       /* Shared IRQ? */
+       if (!masked_status || unlikely(fusbh200->rh_state == FUSBH200_RH_HALTED)) {
+               spin_unlock(&fusbh200->lock);
+               return IRQ_NONE;
+       }
+
+       /* clear (just) interrupts */
+       fusbh200_writel(fusbh200, masked_status, &fusbh200->regs->status);
+       cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
+       bh = 0;
+
+#ifdef VERBOSE_DEBUG
+       /* unrequested/ignored: Frame List Rollover */
+       dbg_status (fusbh200, "irq", status);
+#endif
+
+       /* INT, ERR, and IAA interrupt rates can be throttled */
+
+       /* normal [4.15.1.2] or error [4.15.1.1] completion */
+       if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
+               if (likely ((status & STS_ERR) == 0))
+                       COUNT (fusbh200->stats.normal);
+               else
+                       COUNT (fusbh200->stats.error);
+               bh = 1;
+       }
+
+       /* complete the unlinking of some qh [4.15.2.3] */
+       if (status & STS_IAA) {
+
+               /* Turn off the IAA watchdog */
+               fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_IAA_WATCHDOG);
+
+               /*
+                * Mild optimization: Allow another IAAD to reset the
+                * hrtimer, if one occurs before the next expiration.
+                * In theory we could always cancel the hrtimer, but
+                * tests show that about half the time it will be reset
+                * for some other event anyway.
+                */
+               if (fusbh200->next_hrtimer_event == FUSBH200_HRTIMER_IAA_WATCHDOG)
+                       ++fusbh200->next_hrtimer_event;
+
+               /* guard against (alleged) silicon errata */
+               if (cmd & CMD_IAAD)
+                       fusbh200_dbg(fusbh200, "IAA with IAAD still set?\n");
+               if (fusbh200->async_iaa) {
+                       COUNT(fusbh200->stats.iaa);
+                       end_unlink_async(fusbh200);
+               } else
+                       fusbh200_dbg(fusbh200, "IAA with nothing unlinked?\n");
+       }
+
+       /* remote wakeup [4.3.1] */
+       if (status & STS_PCD) {
+               int pstatus;
+               u32 __iomem *status_reg = &fusbh200->regs->port_status;
+
+               /* kick root hub later */
+               pcd_status = status;
+
+               /* resume root hub? */
+               if (fusbh200->rh_state == FUSBH200_RH_SUSPENDED)
+                       usb_hcd_resume_root_hub(hcd);
+
+               pstatus = fusbh200_readl(fusbh200, status_reg);
+
+               if (test_bit(0, &fusbh200->suspended_ports) &&
+                               ((pstatus & PORT_RESUME) ||
+                                       !(pstatus & PORT_SUSPEND)) &&
+                               (pstatus & PORT_PE) &&
+                               fusbh200->reset_done[0] == 0) {
+
+                       /* start 20 msec resume signaling from this port,
+                        * and make khubd collect PORT_STAT_C_SUSPEND to
+                        * stop that signaling.  Use 5 ms extra for safety,
+                        * like usb_port_resume() does.
+                        */
+                       fusbh200->reset_done[0] = jiffies + msecs_to_jiffies(25);
+                       set_bit(0, &fusbh200->resuming_ports);
+                       fusbh200_dbg (fusbh200, "port 1 remote wakeup\n");
+                       mod_timer(&hcd->rh_timer, fusbh200->reset_done[0]);
+               }
+       }
+
+       /* PCI errors [4.15.2.4] */
+       if (unlikely ((status & STS_FATAL) != 0)) {
+               fusbh200_err(fusbh200, "fatal error\n");
+               dbg_cmd(fusbh200, "fatal", cmd);
+               dbg_status(fusbh200, "fatal", status);
+dead:
+               usb_hc_died(hcd);
+
+               /* Don't let the controller do anything more */
+               fusbh200->shutdown = true;
+               fusbh200->rh_state = FUSBH200_RH_STOPPING;
+               fusbh200->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+               fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
+               fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
+               fusbh200_handle_controller_death(fusbh200);
+
+               /* Handle completions when the controller stops */
+               bh = 0;
+       }
+
+       if (bh)
+               fusbh200_work (fusbh200);
+       spin_unlock (&fusbh200->lock);
+       if (pcd_status)
+               usb_hcd_poll_rh_status(hcd);
+       return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE:  control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int fusbh200_urb_enqueue (
+       struct usb_hcd  *hcd,
+       struct urb      *urb,
+       gfp_t           mem_flags
+) {
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200 (hcd);
+       struct list_head        qtd_list;
+
+       INIT_LIST_HEAD (&qtd_list);
+
+       switch (usb_pipetype (urb->pipe)) {
+       case PIPE_CONTROL:
+               /* qh_completions() code doesn't handle all the fault cases
+                * in multi-TD control transfers.  Even 1KB is rare anyway.
+                */
+               if (urb->transfer_buffer_length > (16 * 1024))
+                       return -EMSGSIZE;
+               /* FALLTHROUGH */
+       /* case PIPE_BULK: */
+       default:
+               if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
+                       return -ENOMEM;
+               return submit_async(fusbh200, urb, &qtd_list, mem_flags);
+
+       case PIPE_INTERRUPT:
+               if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
+                       return -ENOMEM;
+               return intr_submit(fusbh200, urb, &qtd_list, mem_flags);
+
+       case PIPE_ISOCHRONOUS:
+               return itd_submit (fusbh200, urb, mem_flags);
+       }
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int fusbh200_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200 (hcd);
+       struct fusbh200_qh              *qh;
+       unsigned long           flags;
+       int                     rc;
+
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+       if (rc)
+               goto done;
+
+       switch (usb_pipetype (urb->pipe)) {
+       // case PIPE_CONTROL:
+       // case PIPE_BULK:
+       default:
+               qh = (struct fusbh200_qh *) urb->hcpriv;
+               if (!qh)
+                       break;
+               switch (qh->qh_state) {
+               case QH_STATE_LINKED:
+               case QH_STATE_COMPLETING:
+                       start_unlink_async(fusbh200, qh);
+                       break;
+               case QH_STATE_UNLINK:
+               case QH_STATE_UNLINK_WAIT:
+                       /* already started */
+                       break;
+               case QH_STATE_IDLE:
+                       /* QH might be waiting for a Clear-TT-Buffer */
+                       qh_completions(fusbh200, qh);
+                       break;
+               }
+               break;
+
+       case PIPE_INTERRUPT:
+               qh = (struct fusbh200_qh *) urb->hcpriv;
+               if (!qh)
+                       break;
+               switch (qh->qh_state) {
+               case QH_STATE_LINKED:
+               case QH_STATE_COMPLETING:
+                       start_unlink_intr(fusbh200, qh);
+                       break;
+               case QH_STATE_IDLE:
+                       qh_completions (fusbh200, qh);
+                       break;
+               default:
+                       fusbh200_dbg (fusbh200, "bogus qh %p state %d\n",
+                                       qh, qh->qh_state);
+                       goto done;
+               }
+               break;
+
+       case PIPE_ISOCHRONOUS:
+               // itd...
+
+               // wait till next completion, do it then.
+               // completion irqs can wait up to 1024 msec,
+               break;
+       }
+done:
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+       return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// bulk qh holds the data toggle
+
+static void
+fusbh200_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200 (hcd);
+       unsigned long           flags;
+       struct fusbh200_qh              *qh, *tmp;
+
+       /* ASSERT:  any requests/urbs are being unlinked */
+       /* ASSERT:  nobody can be submitting urbs for this any more */
+
+rescan:
+       spin_lock_irqsave (&fusbh200->lock, flags);
+       qh = ep->hcpriv;
+       if (!qh)
+               goto done;
+
+       /* endpoints can be iso streams.  for now, we don't
+        * accelerate iso completions ... so spin a while.
+        */
+       if (qh->hw == NULL) {
+               struct fusbh200_iso_stream      *stream = ep->hcpriv;
+
+               if (!list_empty(&stream->td_list))
+                       goto idle_timeout;
+
+               /* BUG_ON(!list_empty(&stream->free_list)); */
+               kfree(stream);
+               goto done;
+       }
+
+       if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
+               qh->qh_state = QH_STATE_IDLE;
+       switch (qh->qh_state) {
+       case QH_STATE_LINKED:
+       case QH_STATE_COMPLETING:
+               for (tmp = fusbh200->async->qh_next.qh;
+                               tmp && tmp != qh;
+                               tmp = tmp->qh_next.qh)
+                       continue;
+               /* periodic qh self-unlinks on empty, and a COMPLETING qh
+                * may already be unlinked.
+                */
+               if (tmp)
+                       start_unlink_async(fusbh200, qh);
+               /* FALL THROUGH */
+       case QH_STATE_UNLINK:           /* wait for hw to finish? */
+       case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+               spin_unlock_irqrestore (&fusbh200->lock, flags);
+               schedule_timeout_uninterruptible(1);
+               goto rescan;
+       case QH_STATE_IDLE:             /* fully unlinked */
+               if (qh->clearing_tt)
+                       goto idle_timeout;
+               if (list_empty (&qh->qtd_list)) {
+                       qh_destroy(fusbh200, qh);
+                       break;
+               }
+               /* else FALL THROUGH */
+       default:
+               /* caller was supposed to have unlinked any requests;
+                * that's not our job.  just leak this memory.
+                */
+               fusbh200_err (fusbh200, "qh %p (#%02x) state %d%s\n",
+                       qh, ep->desc.bEndpointAddress, qh->qh_state,
+                       list_empty (&qh->qtd_list) ? "" : "(has tds)");
+               break;
+       }
+ done:
+       ep->hcpriv = NULL;
+       spin_unlock_irqrestore (&fusbh200->lock, flags);
+}
+
+static void
+fusbh200_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200(hcd);
+       struct fusbh200_qh              *qh;
+       int                     eptype = usb_endpoint_type(&ep->desc);
+       int                     epnum = usb_endpoint_num(&ep->desc);
+       int                     is_out = usb_endpoint_dir_out(&ep->desc);
+       unsigned long           flags;
+
+       if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+               return;
+
+       spin_lock_irqsave(&fusbh200->lock, flags);
+       qh = ep->hcpriv;
+
+       /* For Bulk and Interrupt endpoints we maintain the toggle state
+        * in the hardware; the toggle bits in udev aren't used at all.
+        * When an endpoint is reset by usb_clear_halt() we must reset
+        * the toggle bit in the QH.
+        */
+       if (qh) {
+               usb_settoggle(qh->dev, epnum, is_out, 0);
+               if (!list_empty(&qh->qtd_list)) {
+                       WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+               } else if (qh->qh_state == QH_STATE_LINKED ||
+                               qh->qh_state == QH_STATE_COMPLETING) {
+
+                       /* The toggle value in the QH can't be updated
+                        * while the QH is active.  Unlink it now;
+                        * re-linking will call qh_refresh().
+                        */
+                       if (eptype == USB_ENDPOINT_XFER_BULK)
+                               start_unlink_async(fusbh200, qh);
+                       else
+                               start_unlink_intr(fusbh200, qh);
+               }
+       }
+       spin_unlock_irqrestore(&fusbh200->lock, flags);
+}
+
+static int fusbh200_get_frame (struct usb_hcd *hcd)
+{
+       struct fusbh200_hcd             *fusbh200 = hcd_to_fusbh200 (hcd);
+       return (fusbh200_read_frame_index(fusbh200) >> 3) % fusbh200->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The EHCI in ChipIdea HDRC cannot be a separate module or device,
+ * because its registers (and irq) are shared between host/gadget/otg
+ * functions  and in order to facilitate role switching we cannot
+ * give the fusbh200 driver exclusive access to those.
+ */
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_LICENSE ("GPL");
+
+static const struct hc_driver fusbh200_fusbh200_hc_driver = {
+       .description            = hcd_name,
+       .product_desc           = "Faraday USB2.0 Host Controller",
+       .hcd_priv_size          = sizeof(struct fusbh200_hcd),
+
+       /*
+        * generic hardware linkage
+        */
+       .irq                    = fusbh200_irq,
+       .flags                  = HCD_MEMORY | HCD_USB2,
+
+       /*
+        * basic lifecycle operations
+        */
+       .reset                  = hcd_fusbh200_init,
+       .start                  = fusbh200_run,
+       .stop                   = fusbh200_stop,
+       .shutdown               = fusbh200_shutdown,
+
+       /*
+        * managing i/o requests and associated device resources
+        */
+       .urb_enqueue            = fusbh200_urb_enqueue,
+       .urb_dequeue            = fusbh200_urb_dequeue,
+       .endpoint_disable       = fusbh200_endpoint_disable,
+       .endpoint_reset         = fusbh200_endpoint_reset,
+
+       /*
+        * scheduling support
+        */
+       .get_frame_number       = fusbh200_get_frame,
+
+       /*
+        * root hub support
+        */
+       .hub_status_data        = fusbh200_hub_status_data,
+       .hub_control            = fusbh200_hub_control,
+       .bus_suspend            = fusbh200_bus_suspend,
+       .bus_resume             = fusbh200_bus_resume,
+
+       .relinquish_port        = fusbh200_relinquish_port,
+       .port_handed_over       = fusbh200_port_handed_over,
+
+       .clear_tt_buffer_complete = fusbh200_clear_tt_buffer_complete,
+};
+
+static void fusbh200_init(struct fusbh200_hcd *fusbh200)
+{
+       u32 reg;
+
+       reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmcsr);
+       reg |= BMCSR_INT_POLARITY;
+       reg &= ~BMCSR_VBUS_OFF;
+       fusbh200_writel(fusbh200, reg, &fusbh200->regs->bmcsr);
+
+       reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmier);
+       fusbh200_writel(fusbh200, reg | BMIER_OVC_EN | BMIER_VBUS_ERR_EN,
+               &fusbh200->regs->bmier);
+}
+
+/**
+ * fusbh200_hcd_probe - initialize faraday FUSBH200 HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int fusbh200_hcd_probe(struct platform_device *pdev)
+{
+       struct device                   *dev = &pdev->dev;
+       struct usb_hcd                  *hcd;
+       struct resource                 *res;
+       int                             irq;
+       int                             retval = -ENODEV;
+       struct fusbh200_hcd             *fusbh200;
+
+       if (usb_disabled())
+               return -ENODEV;
+
+       pdev->dev.power.power_state = PMSG_ON;
+
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev,
+                       "Found HC with no IRQ. Check %s setup!\n",
+                       dev_name(dev));
+               return -ENODEV;
+       }
+
+       irq = res->start;
+
+       hcd = usb_create_hcd(&fusbh200_fusbh200_hc_driver, dev,
+                       dev_name(dev));
+       if (!hcd) {
+               dev_err(dev, "failed to create hcd with err %d\n", retval);
+               retval = -ENOMEM;
+               goto fail_create_hcd;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev,
+                       "Found HC with no register addr. Check %s setup!\n",
+                       dev_name(dev));
+               retval = -ENODEV;
+               goto fail_request_resource;
+       }
+
+       hcd->rsrc_start = res->start;
+       hcd->rsrc_len = resource_size(res);
+       hcd->has_tt = 1;
+
+       if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+                               fusbh200_fusbh200_hc_driver.description)) {
+               dev_dbg(dev, "controller already in use\n");
+               retval = -EBUSY;
+               goto fail_request_resource;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+       if (!res) {
+               dev_err(dev,
+                       "Found HC with no register addr. Check %s setup!\n",
+                       dev_name(dev));
+               retval = -ENODEV;
+               goto fail_request_resource;
+       }
+
+       hcd->regs = ioremap_nocache(res->start, resource_size(res));
+       if (hcd->regs == NULL) {
+               dev_dbg(dev, "error mapping memory\n");
+               retval = -EFAULT;
+               goto fail_ioremap;
+       }
+
+       fusbh200 = hcd_to_fusbh200(hcd);
+
+       fusbh200->caps = hcd->regs;
+
+       retval = fusbh200_setup(hcd);
+       if (retval)
+               goto fail_add_hcd;
+
+       fusbh200_init(fusbh200);
+
+       retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+       if (retval) {
+               dev_err(dev, "failed to add hcd with err %d\n", retval);
+               goto fail_add_hcd;
+       }
+
+       return retval;
+
+fail_add_hcd:
+       iounmap(hcd->regs);
+fail_ioremap:
+       release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+       usb_put_hcd(hcd);
+fail_create_hcd:
+       dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
+       return retval;
+}
+
+/**
+ * fusbh200_hcd_remove - shutdown processing for EHCI HCDs
+ * @dev: USB Host Controller being removed
+ *
+ * Reverses the effect of fotg2xx_usb_hcd_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int fusbh200_hcd_remove(struct platform_device *pdev)
+{
+       struct device *dev      = &pdev->dev;
+       struct usb_hcd *hcd     = dev_get_drvdata(dev);
+
+       if (!hcd)
+               return 0;
+
+       usb_remove_hcd(hcd);
+       iounmap(hcd->regs);
+       release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+       usb_put_hcd(hcd);
+
+       return 0;
+}
+
+static struct platform_driver fusbh200_hcd_fusbh200_driver = {
+       .driver = {
+               .name   = "fusbh200",
+       },
+       .probe  = fusbh200_hcd_probe,
+       .remove = fusbh200_hcd_remove,
+};
+
+static int __init fusbh200_hcd_init(void)
+{
+       int retval = 0;
+
+       if (usb_disabled())
+               return -ENODEV;
+
+       printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
+       set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+       if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+                       test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+               printk(KERN_WARNING "Warning! fusbh200_hcd should always be loaded"
+                               " before uhci_hcd and ohci_hcd, not after\n");
+
+       pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
+                hcd_name,
+                sizeof(struct fusbh200_qh), sizeof(struct fusbh200_qtd),
+                sizeof(struct fusbh200_itd));
+
+#ifdef DEBUG
+       fusbh200_debug_root = debugfs_create_dir("fusbh200", usb_debug_root);
+       if (!fusbh200_debug_root) {
+               retval = -ENOENT;
+               goto err_debug;
+       }
+#endif
+
+       retval = platform_driver_register(&fusbh200_hcd_fusbh200_driver);
+       if (retval < 0)
+               goto clean;
+       return retval;
+
+       platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
+clean:
+#ifdef DEBUG
+       debugfs_remove(fusbh200_debug_root);
+       fusbh200_debug_root = NULL;
+err_debug:
+#endif
+       clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+       return retval;
+}
+module_init(fusbh200_hcd_init);
+
+static void __exit fusbh200_hcd_cleanup(void)
+{
+       platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
+#ifdef DEBUG
+       debugfs_remove(fusbh200_debug_root);
+#endif
+       clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(fusbh200_hcd_cleanup);
diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h
new file mode 100644 (file)
index 0000000..797c9e8
--- /dev/null
@@ -0,0 +1,743 @@
+#ifndef __LINUX_FUSBH200_H
+#define __LINUX_FUSBH200_H
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given FUSBH200_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#define __hc32 __le32
+#define __hc16 __le16
+
+/* statistics can be kept for tuning/monitoring */
+struct fusbh200_stats {
+       /* irq usage */
+       unsigned long           normal;
+       unsigned long           error;
+       unsigned long           iaa;
+       unsigned long           lost_iaa;
+
+       /* termination of urbs from core */
+       unsigned long           complete;
+       unsigned long           unlink;
+};
+
+/* fusbh200_hcd->lock guards shared data against other CPUs:
+ *   fusbh200_hcd:     async, unlink, periodic (and shadow), ...
+ *   usb_host_endpoint: hcpriv
+ *   fusbh200_qh:      qh_next, qtd_list
+ *   fusbh200_qtd:     qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define        FUSBH200_MAX_ROOT_PORTS 1               /* see HCS_N_PORTS */
+
+/*
+ * fusbh200_rh_state values of FUSBH200_RH_RUNNING or above mean that the
+ * controller may be doing DMA.  Lower values mean there's no DMA.
+ */
+enum fusbh200_rh_state {
+       FUSBH200_RH_HALTED,
+       FUSBH200_RH_SUSPENDED,
+       FUSBH200_RH_RUNNING,
+       FUSBH200_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum fusbh200_hrtimer_event {
+       FUSBH200_HRTIMER_POLL_ASS,              /* Poll for async schedule off */
+       FUSBH200_HRTIMER_POLL_PSS,              /* Poll for periodic schedule off */
+       FUSBH200_HRTIMER_POLL_DEAD,             /* Wait for dead controller to stop */
+       FUSBH200_HRTIMER_UNLINK_INTR,   /* Wait for interrupt QH unlink */
+       FUSBH200_HRTIMER_FREE_ITDS,             /* Wait for unused iTDs and siTDs */
+       FUSBH200_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+       FUSBH200_HRTIMER_IAA_WATCHDOG,  /* Handle lost IAA interrupts */
+       FUSBH200_HRTIMER_DISABLE_PERIODIC,      /* Wait to disable periodic sched */
+       FUSBH200_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+       FUSBH200_HRTIMER_IO_WATCHDOG,   /* Check for missing IRQs */
+       FUSBH200_HRTIMER_NUM_EVENTS             /* Must come last */
+};
+#define FUSBH200_HRTIMER_NO_EVENT      99
+
+struct fusbh200_hcd {                  /* one per controller */
+       /* timing support */
+       enum fusbh200_hrtimer_event     next_hrtimer_event;
+       unsigned                enabled_hrtimer_events;
+       ktime_t                 hr_timeouts[FUSBH200_HRTIMER_NUM_EVENTS];
+       struct hrtimer          hrtimer;
+
+       int                     PSS_poll_count;
+       int                     ASS_poll_count;
+       int                     died_poll_count;
+
+       /* glue to PCI and HCD framework */
+       struct fusbh200_caps __iomem *caps;
+       struct fusbh200_regs __iomem *regs;
+       struct fusbh200_dbg_port __iomem *debug;
+
+       __u32                   hcs_params;     /* cached register copy */
+       spinlock_t              lock;
+       enum fusbh200_rh_state  rh_state;
+
+       /* general schedule support */
+       bool                    scanning:1;
+       bool                    need_rescan:1;
+       bool                    intr_unlinking:1;
+       bool                    async_unlinking:1;
+       bool                    shutdown:1;
+       struct fusbh200_qh              *qh_scan_next;
+
+       /* async schedule support */
+       struct fusbh200_qh              *async;
+       struct fusbh200_qh              *dummy;         /* For AMD quirk use */
+       struct fusbh200_qh              *async_unlink;
+       struct fusbh200_qh              *async_unlink_last;
+       struct fusbh200_qh              *async_iaa;
+       unsigned                async_unlink_cycle;
+       unsigned                async_count;    /* async activity count */
+
+       /* periodic schedule support */
+#define        DEFAULT_I_TDPS          1024            /* some HCs can do less */
+       unsigned                periodic_size;
+       __hc32                  *periodic;      /* hw periodic table */
+       dma_addr_t              periodic_dma;
+       struct list_head        intr_qh_list;
+       unsigned                i_thresh;       /* uframes HC might cache */
+
+       union fusbh200_shadow   *pshadow;       /* mirror hw periodic table */
+       struct fusbh200_qh              *intr_unlink;
+       struct fusbh200_qh              *intr_unlink_last;
+       unsigned                intr_unlink_cycle;
+       unsigned                now_frame;      /* frame from HC hardware */
+       unsigned                next_frame;     /* scan periodic, start here */
+       unsigned                intr_count;     /* intr activity count */
+       unsigned                isoc_count;     /* isoc activity count */
+       unsigned                periodic_count; /* periodic activity count */
+       unsigned                uframe_periodic_max; /* max periodic time per uframe */
+
+
+       /* list of itds completed while now_frame was still active */
+       struct list_head        cached_itd_list;
+       struct fusbh200_itd     *last_itd_to_free;
+
+       /* per root hub port */
+       unsigned long           reset_done [FUSBH200_MAX_ROOT_PORTS];
+
+       /* bit vectors (one bit per port) */
+       unsigned long           bus_suspended;          /* which ports were
+                       already suspended at the start of a bus suspend */
+       unsigned long           companion_ports;        /* which ports are
+                       dedicated to the companion controller */
+       unsigned long           owned_ports;            /* which ports are
+                       owned by the companion during a bus suspend */
+       unsigned long           port_c_suspend;         /* which ports have
+                       the change-suspend feature turned on */
+       unsigned long           suspended_ports;        /* which ports are
+                       suspended */
+       unsigned long           resuming_ports;         /* which ports have
+                       started to resume */
+
+       /* per-HC memory pools (could be per-bus, but ...) */
+       struct dma_pool         *qh_pool;       /* qh per active urb */
+       struct dma_pool         *qtd_pool;      /* one or more per qh */
+       struct dma_pool         *itd_pool;      /* itd per iso urb */
+
+       unsigned                random_frame;
+       unsigned long           next_statechange;
+       ktime_t                 last_periodic_enable;
+       u32                     command;
+
+       /* SILICON QUIRKS */
+       unsigned                need_io_watchdog:1;
+       unsigned                fs_i_thresh:1;  /* Intel iso scheduling */
+
+       u8                      sbrn;           /* packed release number */
+
+       /* irq statistics */
+#ifdef FUSBH200_STATS
+       struct fusbh200_stats   stats;
+#      define COUNT(x) do { (x)++; } while (0)
+#else
+#      define COUNT(x) do {} while (0)
+#endif
+
+       /* debug files */
+#ifdef DEBUG
+       struct dentry           *debug_dir;
+#endif
+};
+
+/* convert between an HCD pointer and the corresponding FUSBH200_HCD */
+static inline struct fusbh200_hcd *hcd_to_fusbh200 (struct usb_hcd *hcd)
+{
+       return (struct fusbh200_hcd *) (hcd->hcd_priv);
+}
+static inline struct usb_hcd *fusbh200_to_hcd (struct fusbh200_hcd *fusbh200)
+{
+       return container_of ((void *) fusbh200, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct fusbh200_caps {
+       /* these fields are specified as 8 and 16 bit registers,
+        * but some hosts can't perform 8 or 16 bit PCI accesses.
+        * some hosts treat caplength and hciversion as parts of a 32-bit
+        * register, others treat them as two separate registers, this
+        * affects the memory map for big endian controllers.
+        */
+       u32             hc_capbase;
+#define HC_LENGTH(fusbh200, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+                               (fusbh200_big_endian_capbase(fusbh200) ? 24 : 0)))
+#define HC_VERSION(fusbh200, p)        (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+                               (fusbh200_big_endian_capbase(fusbh200) ? 0 : 16)))
+       u32             hcs_params;     /* HCSPARAMS - offset 0x4 */
+#define HCS_N_PORTS(p)         (((p)>>0)&0xf)  /* bits 3:0, ports on HC */
+
+       u32             hcc_params;      /* HCCPARAMS - offset 0x8 */
+#define HCC_CANPARK(p)         ((p)&(1 << 2))  /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
+       u8              portroute[8];    /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct fusbh200_regs {
+
+       /* USBCMD: offset 0x00 */
+       u32             command;
+
+/* EHCI 1.1 addendum */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK       (1<<11)         /* enable "park" on async qh */
+#define CMD_PARK_CNT(c)        (((c)>>8)&3)    /* how many transfers to park for */
+#define CMD_IAAD       (1<<6)          /* "doorbell" interrupt async advance */
+#define CMD_ASE                (1<<5)          /* async schedule enable */
+#define CMD_PSE                (1<<4)          /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET      (1<<1)          /* reset HC not bus */
+#define CMD_RUN                (1<<0)          /* start/stop HC */
+
+       /* USBSTS: offset 0x04 */
+       u32             status;
+#define STS_ASS                (1<<15)         /* Async Schedule Status */
+#define STS_PSS                (1<<14)         /* Periodic Schedule Status */
+#define STS_RECL       (1<<13)         /* Reclamation */
+#define STS_HALT       (1<<12)         /* Not running (any reason) */
+/* some bits reserved */
+       /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA                (1<<5)          /* Interrupted on async advance */
+#define STS_FATAL      (1<<4)          /* such as some PCI access errors */
+#define STS_FLR                (1<<3)          /* frame list rolled over */
+#define STS_PCD                (1<<2)          /* port change detect */
+#define STS_ERR                (1<<1)          /* "error" completion (overflow, ...) */
+#define STS_INT                (1<<0)          /* "normal" completion (short, ...) */
+
+       /* USBINTR: offset 0x08 */
+       u32             intr_enable;
+
+       /* FRINDEX: offset 0x0C */
+       u32             frame_index;    /* current microframe number */
+       /* CTRLDSSEGMENT: offset 0x10 */
+       u32             segment;        /* address bits 63:32 if needed */
+       /* PERIODICLISTBASE: offset 0x14 */
+       u32             frame_list;     /* points to periodic list */
+       /* ASYNCLISTADDR: offset 0x18 */
+       u32             async_next;     /* address of next async queue head */
+
+       u32     reserved1;
+       /* PORTSC: offset 0x20 */
+       u32     port_status;
+/* 31:23 reserved */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10))       /* USB 1.1 device */
+#define PORT_RESET     (1<<8)          /* reset port */
+#define PORT_SUSPEND   (1<<7)          /* suspend port */
+#define PORT_RESUME    (1<<6)          /* resume it */
+#define PORT_PEC       (1<<3)          /* port enable change */
+#define PORT_PE                (1<<2)          /* port enable */
+#define PORT_CSC       (1<<1)          /* connect status change */
+#define PORT_CONNECT   (1<<0)          /* device connected */
+#define PORT_RWC_BITS   (PORT_CSC | PORT_PEC)
+
+       u32     reserved2[3];
+
+       /* BMCSR: offset 0x30 */
+       u32     bmcsr; /* Bus Moniter Control/Status Register */
+#define BMCSR_HOST_SPD_TYP     (3<<9)
+#define BMCSR_VBUS_OFF         (1<<4)
+#define BMCSR_INT_POLARITY     (1<<3)
+
+       /* BMISR: offset 0x34 */
+       u32     bmisr; /* Bus Moniter Interrupt Status Register*/
+#define BMISR_OVC              (1<<1)
+
+       /* BMIER: offset 0x38 */
+       u32     bmier; /* Bus Moniter Interrupt Enable Register */
+#define BMIER_OVC_EN           (1<<1)
+#define BMIER_VBUS_ERR_EN      (1<<0)
+};
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console.  (nonstandard enumeration.)
+ */
+struct fusbh200_dbg_port {
+       u32     control;
+#define DBGP_OWNER     (1<<30)
+#define DBGP_ENABLED   (1<<28)
+#define DBGP_DONE      (1<<16)
+#define DBGP_INUSE     (1<<10)
+#define DBGP_ERRCODE(x)        (((x)>>7)&0x07)
+#      define DBGP_ERR_BAD     1
+#      define DBGP_ERR_SIGNAL  2
+#define DBGP_ERROR     (1<<6)
+#define DBGP_GO                (1<<5)
+#define DBGP_OUT       (1<<4)
+#define DBGP_LEN(x)    (((x)>>0)&0x0f)
+       u32     pids;
+#define DBGP_PID_GET(x)                (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok)        (((data)<<8)|(tok))
+       u32     data03;
+       u32     data47;
+       u32     address;
+#define DBGP_EPADDR(dev, ep)   (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+#include <linux/init.h>
+extern int __init early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+       return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+       return -1;
+}
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from fusbh200 host driver to fusbh200 debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *hcd);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+       return xen_dbgp_reset_prep(hcd);
+}
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+       return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define        QTD_NEXT(fusbh200, dma) cpu_to_hc32(fusbh200, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct fusbh200_qtd {
+       /* first part defined by EHCI spec */
+       __hc32                  hw_next;        /* see EHCI 3.5.1 */
+       __hc32                  hw_alt_next;    /* see EHCI 3.5.2 */
+       __hc32                  hw_token;       /* see EHCI 3.5.3 */
+#define        QTD_TOGGLE      (1 << 31)       /* data toggle */
+#define        QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define        QTD_IOC         (1 << 15)       /* interrupt on complete */
+#define        QTD_CERR(tok)   (((tok)>>10) & 0x3)
+#define        QTD_PID(tok)    (((tok)>>8) & 0x3)
+#define        QTD_STS_ACTIVE  (1 << 7)        /* HC may execute this */
+#define        QTD_STS_HALT    (1 << 6)        /* halted on error */
+#define        QTD_STS_DBE     (1 << 5)        /* data buffer error (in HC) */
+#define        QTD_STS_BABBLE  (1 << 4)        /* device was babbling (qtd halted) */
+#define        QTD_STS_XACT    (1 << 3)        /* device gave illegal response */
+#define        QTD_STS_MMF     (1 << 2)        /* incomplete split transaction */
+#define        QTD_STS_STS     (1 << 1)        /* split transaction state */
+#define        QTD_STS_PING    (1 << 0)        /* issue PING? */
+
+#define ACTIVE_BIT(fusbh200)   cpu_to_hc32(fusbh200, QTD_STS_ACTIVE)
+#define HALT_BIT(fusbh200)             cpu_to_hc32(fusbh200, QTD_STS_HALT)
+#define STATUS_BIT(fusbh200)   cpu_to_hc32(fusbh200, QTD_STS_STS)
+
+       __hc32                  hw_buf [5];        /* see EHCI 3.5.4 */
+       __hc32                  hw_buf_hi [5];        /* Appendix B */
+
+       /* the rest is HCD-private */
+       dma_addr_t              qtd_dma;                /* qtd address */
+       struct list_head        qtd_list;               /* sw qtd list */
+       struct urb              *urb;                   /* qtd's urb */
+       size_t                  length;                 /* length of buffer */
+} __attribute__ ((aligned (32)));
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(fusbh200)     cpu_to_hc32 (fusbh200, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,fstn}->hw_next */
+#define Q_NEXT_TYPE(fusbh200,dma)      ((dma) & cpu_to_hc32(fusbh200, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD     (0 << 1)
+#define Q_TYPE_QH      (1 << 1)
+#define Q_TYPE_SITD    (2 << 1)
+#define Q_TYPE_FSTN    (3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(fusbh200,dma)  (cpu_to_hc32(fusbh200, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define FUSBH200_LIST_END(fusbh200)    cpu_to_hc32(fusbh200, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure.  That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule.  Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union fusbh200_shadow {
+       struct fusbh200_qh      *qh;            /* Q_TYPE_QH */
+       struct fusbh200_itd     *itd;           /* Q_TYPE_ITD */
+       struct fusbh200_fstn    *fstn;          /* Q_TYPE_FSTN */
+       __hc32                  *hw_next;       /* (all types) */
+       void                    *ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct fusbh200_qh_hw {
+       __hc32                  hw_next;        /* see EHCI 3.6.1 */
+       __hc32                  hw_info1;       /* see EHCI 3.6.2 */
+#define        QH_CONTROL_EP   (1 << 27)       /* FS/LS control endpoint */
+#define        QH_HEAD         (1 << 15)       /* Head of async reclamation list */
+#define        QH_TOGGLE_CTL   (1 << 14)       /* Data toggle control */
+#define        QH_HIGH_SPEED   (2 << 12)       /* Endpoint speed */
+#define        QH_LOW_SPEED    (1 << 12)
+#define        QH_FULL_SPEED   (0 << 12)
+#define        QH_INACTIVATE   (1 << 7)        /* Inactivate on next transaction */
+       __hc32                  hw_info2;        /* see EHCI 3.6.2 */
+#define        QH_SMASK        0x000000ff
+#define        QH_CMASK        0x0000ff00
+#define        QH_HUBADDR      0x007f0000
+#define        QH_HUBPORT      0x3f800000
+#define        QH_MULT         0xc0000000
+       __hc32                  hw_current;     /* qtd list - see EHCI 3.6.4 */
+
+       /* qtd overlay (hardware parts of a struct fusbh200_qtd) */
+       __hc32                  hw_qtd_next;
+       __hc32                  hw_alt_next;
+       __hc32                  hw_token;
+       __hc32                  hw_buf [5];
+       __hc32                  hw_buf_hi [5];
+} __attribute__ ((aligned(32)));
+
+struct fusbh200_qh {
+       struct fusbh200_qh_hw   *hw;            /* Must come first */
+       /* the rest is HCD-private */
+       dma_addr_t              qh_dma;         /* address of qh */
+       union fusbh200_shadow   qh_next;        /* ptr to qh; or periodic */
+       struct list_head        qtd_list;       /* sw qtd list */
+       struct list_head        intr_node;      /* list of intr QHs */
+       struct fusbh200_qtd             *dummy;
+       struct fusbh200_qh              *unlink_next;   /* next on unlink list */
+
+       unsigned                unlink_cycle;
+
+       u8                      needs_rescan;   /* Dequeue during giveback */
+       u8                      qh_state;
+#define        QH_STATE_LINKED         1               /* HC sees this */
+#define        QH_STATE_UNLINK         2               /* HC may still see this */
+#define        QH_STATE_IDLE           3               /* HC doesn't see this */
+#define        QH_STATE_UNLINK_WAIT    4               /* LINKED and on unlink q */
+#define        QH_STATE_COMPLETING     5               /* don't touch token.HALT */
+
+       u8                      xacterrs;       /* XactErr retry counter */
+#define        QH_XACTERR_MAX          32              /* XactErr retry limit */
+
+       /* periodic schedule info */
+       u8                      usecs;          /* intr bandwidth */
+       u8                      gap_uf;         /* uframes split/csplit gap */
+       u8                      c_usecs;        /* ... split completion bw */
+       u16                     tt_usecs;       /* tt downstream bandwidth */
+       unsigned short          period;         /* polling interval */
+       unsigned short          start;          /* where polling starts */
+#define NO_FRAME ((unsigned short)~0)                  /* pick new start */
+
+       struct usb_device       *dev;           /* access to TT */
+       unsigned                is_out:1;       /* bulk or intr OUT */
+       unsigned                clearing_tt:1;  /* Clear-TT-Buf in progress */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct fusbh200_iso_packet {
+       /* These will be copied to iTD when scheduling */
+       u64                     bufp;           /* itd->hw_bufp{,_hi}[pg] |= */
+       __hc32                  transaction;    /* itd->hw_transaction[i] |= */
+       u8                      cross;          /* buf crosses pages */
+       /* for full speed OUT splits */
+       u32                     buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct fusbh200_iso_sched {
+       struct list_head        td_list;
+       unsigned                span;
+       struct fusbh200_iso_packet      packet [0];
+};
+
+/*
+ * fusbh200_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct fusbh200_iso_stream {
+       /* first field matches fusbh200_hq, but is NULL */
+       struct fusbh200_qh_hw   *hw;
+
+       u8                      bEndpointAddress;
+       u8                      highspeed;
+       struct list_head        td_list;        /* queued itds */
+       struct list_head        free_list;      /* list of unused itds */
+       struct usb_device       *udev;
+       struct usb_host_endpoint *ep;
+
+       /* output of (re)scheduling */
+       int                     next_uframe;
+       __hc32                  splits;
+
+       /* the rest is derived from the endpoint descriptor,
+        * trusting urb->interval == f(epdesc->bInterval) and
+        * including the extra info for hw_bufp[0..2]
+        */
+       u8                      usecs, c_usecs;
+       u16                     interval;
+       u16                     tt_usecs;
+       u16                     maxp;
+       u16                     raw_mask;
+       unsigned                bandwidth;
+
+       /* This is used to initialize iTD's hw_bufp fields */
+       __hc32                  buf0;
+       __hc32                  buf1;
+       __hc32                  buf2;
+
+       /* this is used to initialize sITD's tt info */
+       __hc32                  address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct fusbh200_itd {
+       /* first part defined by EHCI spec */
+       __hc32                  hw_next;           /* see EHCI 3.3.1 */
+       __hc32                  hw_transaction [8]; /* see EHCI 3.3.2 */
+#define FUSBH200_ISOC_ACTIVE        (1<<31)        /* activate transfer this slot */
+#define FUSBH200_ISOC_BUF_ERR       (1<<30)        /* Data buffer error */
+#define FUSBH200_ISOC_BABBLE        (1<<29)        /* babble detected */
+#define FUSBH200_ISOC_XACTERR       (1<<28)        /* XactErr - transaction error */
+#define        FUSBH200_ITD_LENGTH(tok)        (((tok)>>16) & 0x0fff)
+#define        FUSBH200_ITD_IOC                (1 << 15)       /* interrupt on complete */
+
+#define ITD_ACTIVE(fusbh200)   cpu_to_hc32(fusbh200, FUSBH200_ISOC_ACTIVE)
+
+       __hc32                  hw_bufp [7];    /* see EHCI 3.3.3 */
+       __hc32                  hw_bufp_hi [7]; /* Appendix B */
+
+       /* the rest is HCD-private */
+       dma_addr_t              itd_dma;        /* for this itd */
+       union fusbh200_shadow   itd_next;       /* ptr to periodic q entry */
+
+       struct urb              *urb;
+       struct fusbh200_iso_stream      *stream;        /* endpoint's queue */
+       struct list_head        itd_list;       /* list of stream's itds */
+
+       /* any/all hw_transactions here may be used by that urb */
+       unsigned                frame;          /* where scheduled */
+       unsigned                pg;
+       unsigned                index[8];       /* in urb->iso_frame_desc */
+} __attribute__ ((aligned (32)));
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2.  In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct fusbh200_fstn {
+       __hc32                  hw_next;        /* any periodic q entry */
+       __hc32                  hw_prev;        /* qh or FUSBH200_LIST_END */
+
+       /* the rest is HCD-private */
+       dma_addr_t              fstn_dma;
+       union fusbh200_shadow   fstn_next;      /* ptr to periodic q entry */
+} __attribute__ ((aligned (32)));
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define fusbh200_prepare_ports_for_controller_suspend(fusbh200, do_wakeup)     \
+               fusbh200_adjust_port_wakeup_flags(fusbh200, true, do_wakeup);
+
+#define fusbh200_prepare_ports_for_controller_resume(fusbh200)                 \
+               fusbh200_adjust_port_wakeup_flags(fusbh200, false, false);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature.  Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+static inline unsigned int
+fusbh200_get_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
+{
+       return (readl(&fusbh200->regs->bmcsr)
+               & BMCSR_HOST_SPD_TYP) >> 9;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+fusbh200_port_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
+{
+       switch (fusbh200_get_speed(fusbh200, portsc)) {
+       case 0:
+               return 0;
+       case 1:
+               return USB_PORT_STAT_LOW_SPEED;
+       case 2:
+       default:
+               return USB_PORT_STAT_HIGH_SPEED;
+       }
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define        fusbh200_has_fsl_portno_bug(e)          (0)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ */
+
+#define fusbh200_big_endian_mmio(e)    0
+#define fusbh200_big_endian_capbase(e) 0
+
+static inline unsigned int fusbh200_readl(const struct fusbh200_hcd *fusbh200,
+               __u32 __iomem * regs)
+{
+       return readl(regs);
+}
+
+static inline void fusbh200_writel(const struct fusbh200_hcd *fusbh200,
+               const unsigned int val, __u32 __iomem *regs)
+{
+       writel(val, regs);
+}
+
+/* cpu to fusbh200 */
+static inline __hc32 cpu_to_hc32 (const struct fusbh200_hcd *fusbh200, const u32 x)
+{
+       return cpu_to_le32(x);
+}
+
+/* fusbh200 to cpu */
+static inline u32 hc32_to_cpu (const struct fusbh200_hcd *fusbh200, const __hc32 x)
+{
+       return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup (const struct fusbh200_hcd *fusbh200, const __hc32 *x)
+{
+       return le32_to_cpup(x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline unsigned fusbh200_read_frame_index(struct fusbh200_hcd *fusbh200)
+{
+       return fusbh200_readl(fusbh200, &fusbh200->regs->frame_index);
+}
+
+#define fusbh200_itdlen(urb, desc, t) ({                       \
+       usb_pipein((urb)->pipe) ?                               \
+       (desc)->length - FUSBH200_ITD_LENGTH(t) :                       \
+       FUSBH200_ITD_LENGTH(t);                                 \
+})
+/*-------------------------------------------------------------------------*/
+
+#ifndef DEBUG
+#define STUB_DEBUG_FILES
+#endif /* DEBUG */
+
+/*-------------------------------------------------------------------------*/
+
+#endif /* __LINUX_FUSBH200_H */
index 104730dabd2d59f44ab8c0f4a8da7e538d111792..483990c716aa5938ce58e8b7b227e5ab9dec421c 100644 (file)
@@ -577,7 +577,7 @@ static struct hc_driver hwahc_hc_driver = {
        .product_desc = "Wireless USB HWA host controller",
        .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
        .irq = NULL,                    /* FIXME */
-       .flags = HCD_USB2,              /* FIXME */
+       .flags = HCD_USB25,
        .reset = hwahc_op_reset,
        .start = hwahc_op_start,
        .stop = hwahc_op_stop,
@@ -588,8 +588,6 @@ static struct hc_driver hwahc_hc_driver = {
 
        .hub_status_data = wusbhc_rh_status_data,
        .hub_control = wusbhc_rh_control,
-       .bus_suspend = wusbhc_rh_suspend,
-       .bus_resume = wusbhc_rh_resume,
        .start_port_reset = wusbhc_rh_start_port_reset,
 };
 
@@ -685,12 +683,9 @@ static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
        wa->usb_dev = usb_get_dev(usb_dev);     /* bind the USB device */
        wa->usb_iface = usb_get_intf(iface);
        wusbhc->dev = dev;
-       wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent);
-       if (wusbhc->uwb_rc == NULL) {
-               result = -ENODEV;
-               dev_err(dev, "Cannot get associated UWB Host Controller\n");
-               goto error_rc_get;
-       }
+       /* defer getting the uwb_rc handle until it is needed since it
+        * may not have been registered by the hwa_rc driver yet. */
+       wusbhc->uwb_rc = NULL;
        result = wa_fill_descr(wa);     /* Get the device descriptor */
        if (result < 0)
                goto error_fill_descriptor;
@@ -733,8 +728,6 @@ error_wusbhc_create:
        /* WA Descr fill allocs no resources */
 error_security_create:
 error_fill_descriptor:
-       uwb_rc_put(wusbhc->uwb_rc);
-error_rc_get:
        usb_put_intf(iface);
        usb_put_dev(usb_dev);
        return result;
@@ -776,6 +769,7 @@ static int hwahc_probe(struct usb_interface *usb_iface,
                goto error_alloc;
        }
        usb_hcd->wireless = 1;
+       usb_hcd->self.sg_tablesize = ~0;
        wusbhc = usb_hcd_to_wusbhc(usb_hcd);
        hwahc = container_of(wusbhc, struct hwahc, wusbhc);
        hwahc_init(hwahc);
index f0ebe8e7c58b4dced4d50617b7a6235d90143cac..03dc4d9cbeca811e156d8e5ed32c235a20129177 100644 (file)
@@ -809,26 +809,36 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
 
        /* calculate frame */
        cur_frame = imx21_hc_get_frame(hcd);
-       if (urb->transfer_flags & URB_ISO_ASAP) {
-               if (list_empty(&ep_priv->td_list))
-                       urb->start_frame = cur_frame + 5;
-               else
-                       urb->start_frame = list_entry(
-                               ep_priv->td_list.prev,
-                               struct td, list)->frame + urb->interval;
-       }
-       urb->start_frame = wrap_frame(urb->start_frame);
-       if (frame_after(cur_frame, urb->start_frame)) {
-               dev_dbg(imx21->dev,
-                       "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
-                       urb->start_frame, cur_frame,
-                       (urb->transfer_flags & URB_ISO_ASAP) != 0);
-               urb->start_frame = wrap_frame(cur_frame + 1);
+       i = 0;
+       if (list_empty(&ep_priv->td_list)) {
+               urb->start_frame = wrap_frame(cur_frame + 5);
+       } else {
+               urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
+                               struct td, list)->frame + urb->interval);
+
+               if (frame_after(cur_frame, urb->start_frame)) {
+                       dev_dbg(imx21->dev,
+                               "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
+                               urb->start_frame, cur_frame,
+                               (urb->transfer_flags & URB_ISO_ASAP) != 0);
+                       i = DIV_ROUND_UP(wrap_frame(
+                                       cur_frame - urb->start_frame),
+                                       urb->interval);
+                       if (urb->transfer_flags & URB_ISO_ASAP) {
+                               urb->start_frame = wrap_frame(urb->start_frame
+                                               + i * urb->interval);
+                               i = 0;
+                       } else if (i >= urb->number_of_packets) {
+                               ret = -EXDEV;
+                               goto alloc_dmem_failed;
+                       }
+               }
        }
 
        /* set up transfers */
+       urb_priv->isoc_remaining = urb->number_of_packets - i;
        td = urb_priv->isoc_td;
-       for (i = 0; i < urb->number_of_packets; i++, td++) {
+       for (; i < urb->number_of_packets; i++, td++) {
                unsigned int offset = urb->iso_frame_desc[i].offset;
                td->ep = ep;
                td->urb = urb;
@@ -840,7 +850,6 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
                list_add_tail(&td->list, &ep_priv->td_list);
        }
 
-       urb_priv->isoc_remaining = urb->number_of_packets;
        dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
                urb->number_of_packets, urb->start_frame, td->frame);
 
index a13709ee4e5d933bf4514df16296d66c7700ef17..3df49b169b531f730cfc4c0cf33c8d7dd27a42d6 100644 (file)
@@ -118,7 +118,7 @@ static int of_isp1760_probe(struct platform_device *dev)
                goto free_gpio;
        }
 
-       dev_set_drvdata(&dev->dev, drvdata);
+       platform_set_drvdata(dev, drvdata);
        return ret;
 
 free_gpio:
@@ -133,9 +133,7 @@ free_data:
 
 static int of_isp1760_remove(struct platform_device *dev)
 {
-       struct isp1760 *drvdata = dev_get_drvdata(&dev->dev);
-
-       dev_set_drvdata(&dev->dev, NULL);
+       struct isp1760 *drvdata = platform_get_drvdata(dev);
 
        usb_remove_hcd(drvdata->hcd);
        iounmap(drvdata->hcd->regs);
@@ -398,7 +396,7 @@ static int isp1760_plat_probe(struct platform_device *pdev)
                               irqflags, -ENOENT,
                               &pdev->dev, dev_name(&pdev->dev), devflags);
 
-       dev_set_drvdata(&pdev->dev, hcd);
+       platform_set_drvdata(pdev, hcd);
 
        if (IS_ERR(hcd)) {
                pr_warning("isp1760: Failed to register the HCD device\n");
@@ -419,7 +417,7 @@ static int isp1760_plat_remove(struct platform_device *pdev)
 {
        struct resource *mem_res;
        resource_size_t mem_size;
-       struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(pdev);
 
        usb_remove_hcd(hcd);
 
index 2ee1496dbc1d37c1f778d847712f78437abe21a4..9677f6831209709d8ea7c42b97c91af3959c000d 100644 (file)
@@ -41,17 +41,17 @@ extern int usb_disabled(void);
 
 static void at91_start_clock(void)
 {
-       clk_enable(hclk);
-       clk_enable(iclk);
-       clk_enable(fclk);
+       clk_prepare_enable(hclk);
+       clk_prepare_enable(iclk);
+       clk_prepare_enable(fclk);
        clocked = 1;
 }
 
 static void at91_stop_clock(void)
 {
-       clk_disable(fclk);
-       clk_disable(iclk);
-       clk_disable(hclk);
+       clk_disable_unprepare(fclk);
+       clk_disable_unprepare(iclk);
+       clk_disable_unprepare(hclk);
        clocked = 0;
 }
 
index 0b815a856811ed74d4bae53343a910c2d1d0ac05..6aaa9c9c8eb00f90d3f99711196659991aca2f43 100644 (file)
@@ -401,7 +401,6 @@ static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
        struct usb_hcd  *hcd = platform_get_drvdata(dev);
 
        usb_hcd_da8xx_remove(hcd, dev);
-       platform_set_drvdata(dev, NULL);
 
        return 0;
 }
index fc627fd5411670369d89ba4aec7428b55e2fdef0..a9d3437da220393e9a9454f8a94914692a4d0354 100644 (file)
@@ -79,23 +79,8 @@ static const char    hcd_name [] = "ohci_hcd";
 #include "pci-quirks.h"
 
 static void ohci_dump (struct ohci_hcd *ohci, int verbose);
-static int ohci_init (struct ohci_hcd *ohci);
 static void ohci_stop (struct usb_hcd *hcd);
 
-#if defined(CONFIG_PM) || defined(CONFIG_PCI)
-static int ohci_restart (struct ohci_hcd *ohci);
-#endif
-
-#ifdef CONFIG_PCI
-static void sb800_prefetch(struct ohci_hcd *ohci, int on);
-#else
-static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
-{
-       return;
-}
-#endif
-
-
 #include "ohci-hub.c"
 #include "ohci-dbg.c"
 #include "ohci-mem.c"
@@ -772,6 +757,32 @@ retry:
        return 0;
 }
 
+/* ohci_setup routine for generic controller initialization */
+
+int ohci_setup(struct usb_hcd *hcd)
+{
+       struct ohci_hcd         *ohci = hcd_to_ohci(hcd);
+
+       ohci_hcd_init(ohci);
+       
+       return ohci_init(ohci);
+}
+EXPORT_SYMBOL_GPL(ohci_setup);
+
+/* ohci_start routine for generic controller start of all OHCI bus glue */
+static int ohci_start(struct usb_hcd *hcd)
+{
+       struct ohci_hcd         *ohci = hcd_to_ohci(hcd);
+       int     ret;
+
+       ret = ohci_run(ohci);
+       if (ret < 0) {
+               ohci_err(ohci, "can't start\n");
+               ohci_stop(hcd);
+       }
+       return ret;
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* an interrupt happens */
@@ -953,12 +964,13 @@ static void ohci_stop (struct usb_hcd *hcd)
 #if defined(CONFIG_PM) || defined(CONFIG_PCI)
 
 /* must not be called from interrupt context */
-static int ohci_restart (struct ohci_hcd *ohci)
+int ohci_restart(struct ohci_hcd *ohci)
 {
        int temp;
        int i;
        struct urb_priv *priv;
 
+       ohci_init(ohci);
        spin_lock_irq(&ohci->lock);
        ohci->rh_state = OHCI_RH_HALTED;
 
@@ -1012,12 +1024,13 @@ static int ohci_restart (struct ohci_hcd *ohci)
        ohci_dbg(ohci, "restart complete\n");
        return 0;
 }
+EXPORT_SYMBOL_GPL(ohci_restart);
 
 #endif
 
 #ifdef CONFIG_PM
 
-static int __maybe_unused ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 {
        struct ohci_hcd *ohci = hcd_to_ohci (hcd);
        unsigned long   flags;
@@ -1035,9 +1048,10 @@ static int __maybe_unused ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ohci_suspend);
 
 
-static int __maybe_unused ohci_resume(struct usb_hcd *hcd, bool hibernated)
+int ohci_resume(struct usb_hcd *hcd, bool hibernated)
 {
        struct ohci_hcd         *ohci = hcd_to_ohci(hcd);
        int                     port;
@@ -1085,20 +1099,79 @@ static int __maybe_unused ohci_resume(struct usb_hcd *hcd, bool hibernated)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ohci_resume);
 
 #endif
 
 /*-------------------------------------------------------------------------*/
 
+/*
+ * Generic structure: This gets copied for platform drivers so that
+ * individual entries can be overridden as needed.
+ */
+
+static const struct hc_driver ohci_hc_driver = {
+       .description =          hcd_name,
+       .product_desc =         "OHCI Host Controller",
+       .hcd_priv_size =        sizeof(struct ohci_hcd),
+
+       /*
+        * generic hardware linkage
+       */
+       .irq =                  ohci_irq,
+       .flags =                HCD_MEMORY | HCD_USB11,
+
+       /*
+       * basic lifecycle operations
+       */
+       .reset =                ohci_setup,
+       .start =                ohci_start,
+       .stop =                 ohci_stop,
+       .shutdown =             ohci_shutdown,
+
+       /*
+        * managing i/o requests and associated device resources
+       */
+       .urb_enqueue =          ohci_urb_enqueue,
+       .urb_dequeue =          ohci_urb_dequeue,
+       .endpoint_disable =     ohci_endpoint_disable,
+
+       /*
+       * scheduling support
+       */
+       .get_frame_number =     ohci_get_frame,
+
+       /*
+       * root hub support
+       */
+       .hub_status_data =      ohci_hub_status_data,
+       .hub_control =          ohci_hub_control,
+#ifdef CONFIG_PM
+       .bus_suspend =          ohci_bus_suspend,
+       .bus_resume =           ohci_bus_resume,
+#endif
+       .start_port_reset =     ohci_start_port_reset,
+};
+
+void ohci_init_driver(struct hc_driver *drv,
+               const struct ohci_driver_overrides *over)
+{
+       /* Copy the generic table to drv and then apply the overrides */
+       *drv = ohci_hc_driver;
+
+       drv->product_desc = over->product_desc;
+       drv->hcd_priv_size += over->extra_priv_size;
+       if (over->reset)
+               drv->reset = over->reset;
+}
+EXPORT_SYMBOL_GPL(ohci_init_driver);
+
+/*-------------------------------------------------------------------------*/
+
 MODULE_AUTHOR (DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE ("GPL");
 
-#ifdef CONFIG_PCI
-#include "ohci-pci.c"
-#define PCI_DRIVER             ohci_pci_driver
-#endif
-
 #if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
 #include "ohci-sa1111.c"
 #define SA1111_DRIVER          ohci_hcd_sa1111_driver
@@ -1189,30 +1262,6 @@ MODULE_LICENSE ("GPL");
 #define PLATFORM_DRIVER                ohci_hcd_tilegx_driver
 #endif
 
-#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
-#include "ohci-platform.c"
-#define PLATFORM_DRIVER                ohci_platform_driver
-#endif
-
-#if    !defined(PCI_DRIVER) &&         \
-       !defined(PLATFORM_DRIVER) &&    \
-       !defined(OMAP1_PLATFORM_DRIVER) &&      \
-       !defined(OMAP3_PLATFORM_DRIVER) &&      \
-       !defined(OF_PLATFORM_DRIVER) && \
-       !defined(SA1111_DRIVER) &&      \
-       !defined(PS3_SYSTEM_BUS_DRIVER) && \
-       !defined(SM501_OHCI_DRIVER) && \
-       !defined(TMIO_OHCI_DRIVER) && \
-       !defined(S3C2410_PLATFORM_DRIVER) && \
-       !defined(EXYNOS_PLATFORM_DRIVER) && \
-       !defined(EP93XX_PLATFORM_DRIVER) && \
-       !defined(AT91_PLATFORM_DRIVER) && \
-       !defined(NXP_PLATFORM_DRIVER) && \
-       !defined(DAVINCI_PLATFORM_DRIVER) && \
-       !defined(SPEAR_PLATFORM_DRIVER)
-#error "missing bus glue for ohci-hcd"
-#endif
-
 static int __init ohci_hcd_mod_init(void)
 {
        int retval = 0;
@@ -1269,12 +1318,6 @@ static int __init ohci_hcd_mod_init(void)
                goto error_sa1111;
 #endif
 
-#ifdef PCI_DRIVER
-       retval = pci_register_driver(&PCI_DRIVER);
-       if (retval < 0)
-               goto error_pci;
-#endif
-
 #ifdef SM501_OHCI_DRIVER
        retval = platform_driver_register(&SM501_OHCI_DRIVER);
        if (retval < 0)
@@ -1368,10 +1411,6 @@ static int __init ohci_hcd_mod_init(void)
        platform_driver_unregister(&SM501_OHCI_DRIVER);
  error_sm501:
 #endif
-#ifdef PCI_DRIVER
-       pci_unregister_driver(&PCI_DRIVER);
- error_pci:
-#endif
 #ifdef SA1111_DRIVER
        sa1111_driver_unregister(&SA1111_DRIVER);
  error_sa1111:
@@ -1436,9 +1475,6 @@ static void __exit ohci_hcd_mod_exit(void)
 #ifdef SM501_OHCI_DRIVER
        platform_driver_unregister(&SM501_OHCI_DRIVER);
 #endif
-#ifdef PCI_DRIVER
-       pci_unregister_driver(&PCI_DRIVER);
-#endif
 #ifdef SA1111_DRIVER
        sa1111_driver_unregister(&SA1111_DRIVER);
 #endif
index 60ff4220e8b4fbe771857f5b1316e02663eccf68..2347ab83f046f7c7ce561aaec5562f47399efd59 100644 (file)
@@ -176,7 +176,6 @@ __acquires(ohci->lock)
        if (status == -EBUSY) {
                if (!autostopped) {
                        spin_unlock_irq (&ohci->lock);
-                       (void) ohci_init (ohci);
                        status = ohci_restart (ohci);
 
                        usb_root_hub_lost_power(hcd->self.root_hub);
index 8062bb9dea16eaeeeeda7c274bf8abd95f593fd1..d4ef53990d7325c33746e7d57fd395c4ffc1c63b 100644 (file)
@@ -221,7 +221,6 @@ static int jz4740_ohci_probe(struct platform_device *pdev)
        return 0;
 
 err_disable:
-       platform_set_drvdata(pdev, NULL);
        if (jz4740_ohci->vbus) {
                regulator_disable(jz4740_ohci->vbus);
                regulator_put(jz4740_ohci->vbus);
@@ -246,8 +245,6 @@ static int jz4740_ohci_remove(struct platform_device *pdev)
 
        usb_remove_hcd(hcd);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (jz4740_ohci->vbus) {
                regulator_disable(jz4740_ohci->vbus);
                regulator_put(jz4740_ohci->vbus);
index 5d7eb72c506403b6e3022f14b09aa47e2c8a97c1..7d7d507d54e83ef89cdd0695d694f7e3bb4a5e7d 100644 (file)
@@ -351,7 +351,6 @@ static int usb_hcd_nxp_remove(struct platform_device *pdev)
 
        usb_remove_hcd(hcd);
        nxp_stop_hc();
-       release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
        clk_disable(usb_pll_clk);
        clk_put(usb_pll_clk);
@@ -360,8 +359,6 @@ static int usb_hcd_nxp_remove(struct platform_device *pdev)
        i2c_unregister_device(isp1301_i2c_client);
        isp1301_i2c_client = NULL;
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index d44430d009f89c48767efa4e422b1854c4d58673..342dc7e543b81afd5b17377ac351caf2405b7a02 100644 (file)
@@ -196,8 +196,6 @@ static int ohci_octeon_drv_remove(struct platform_device *pdev)
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index b1d32fb4a7ae60ead8bcc3db961ba0b0d9385027..8747fa6a51b7cf1c067bf7cf8264c442a0e330db 100644 (file)
@@ -498,7 +498,6 @@ static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
        struct usb_hcd          *hcd = platform_get_drvdata(dev);
 
        usb_hcd_omap_remove(hcd, dev);
-       platform_set_drvdata(dev, NULL);
 
        return 0;
 }
index 8663851c8d8eac79b5d358d9e6f81f6116d5fc41..8f713571a0b75b9a24266bf281c934a2dcaf93cd 100644 (file)
@@ -252,7 +252,7 @@ static struct platform_driver ohci_hcd_omap3_driver = {
        .shutdown       = ohci_hcd_omap3_shutdown,
        .driver         = {
                .name   = "ohci-omap3",
-               .of_match_table = of_match_ptr(omap_ohci_dt_ids),
+               .of_match_table = omap_ohci_dt_ids,
        },
 };
 
index 951514ef446dfa5c5dbaff1fd299c5e4f121dd8c..08613e24189415641bc033d61af814e77b47143b 100644 (file)
  * This file is licenced under the GPL.
  */
 
-#ifndef CONFIG_PCI
-#error "This file is PCI bus glue.  CONFIG_PCI must be defined."
-#endif
-
-#include <linux/pci.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+#include "pci-quirks.h"
+
+#define DRIVER_DESC "OHCI PCI platform driver"
+
+static const char hcd_name[] = "ohci-pci";
 
 
 /*-------------------------------------------------------------------------*/
@@ -123,13 +130,6 @@ static void ohci_quirk_nec_worker(struct work_struct *work)
        struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
        int status;
 
-       status = ohci_init(ohci);
-       if (status != 0) {
-               ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
-                        "ohci_init", status);
-               return;
-       }
-
        status = ohci_restart(ohci);
        if (status != 0)
                ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
@@ -175,19 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
        return 0;
 }
 
-static void sb800_prefetch(struct ohci_hcd *ohci, int on)
-{
-       struct pci_dev *pdev;
-       u16 misc;
-
-       pdev = to_pci_dev(ohci_to_hcd(ohci)->self.controller);
-       pci_read_config_word(pdev, 0x50, &misc);
-       if (on == 0)
-               pci_write_config_word(pdev, 0x50, misc & 0xfcff);
-       else
-               pci_write_config_word(pdev, 0x50, misc | 0x0300);
-}
-
 /* List of quirks for OHCI */
 static const struct pci_device_id ohci_pci_quirks[] = {
        {
@@ -249,10 +236,10 @@ static const struct pci_device_id ohci_pci_quirks[] = {
 static int ohci_pci_reset (struct usb_hcd *hcd)
 {
        struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+       struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
        int ret = 0;
 
        if (hcd->self.controller) {
-               struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
                const struct pci_device_id *quirk_id;
 
                quirk_id = pci_match_id(ohci_pci_quirks, pdev);
@@ -262,94 +249,25 @@ static int ohci_pci_reset (struct usb_hcd *hcd)
                        ret = quirk(hcd);
                }
        }
-       if (ret == 0) {
-               ohci_hcd_init (ohci);
-               return ohci_init (ohci);
-       }
-       return ret;
-}
-
-
-static int ohci_pci_start (struct usb_hcd *hcd)
-{
-       struct ohci_hcd *ohci = hcd_to_ohci (hcd);
-       int             ret;
-
-#ifdef CONFIG_PM /* avoid warnings about unused pdev */
-       if (hcd->self.controller) {
-               struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
-
-               /* RWC may not be set for add-in PCI cards, since boot
-                * firmware probably ignored them.  This transfers PCI
-                * PM wakeup capabilities.
-                */
-               if (device_can_wakeup(&pdev->dev))
-                       ohci->hc_control |= OHCI_CTRL_RWC;
-       }
-#endif /* CONFIG_PM */
 
-       ret = ohci_run (ohci);
-       if (ret < 0) {
-               ohci_err (ohci, "can't start\n");
-               ohci_stop (hcd);
-       }
+       if (ret == 0)
+               ret = ohci_setup(hcd);
+       /*
+       * After ohci setup RWC may not be set for add-in PCI cards.
+       * This transfers PCI PM wakeup capabilities.
+       */
+       if (device_can_wakeup(&pdev->dev))
+               ohci->hc_control |= OHCI_CTRL_RWC;
        return ret;
 }
 
+static struct hc_driver __read_mostly ohci_pci_hc_driver;
 
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_pci_hc_driver = {
-       .description =          hcd_name,
-       .product_desc =         "OHCI Host Controller",
-       .hcd_priv_size =        sizeof(struct ohci_hcd),
-
-       /*
-        * generic hardware linkage
-        */
-       .irq =                  ohci_irq,
-       .flags =                HCD_MEMORY | HCD_USB11,
-
-       /*
-        * basic lifecycle operations
-        */
+static const struct ohci_driver_overrides pci_overrides __initconst = {
+       .product_desc =         "OHCI PCI host controller",
        .reset =                ohci_pci_reset,
-       .start =                ohci_pci_start,
-       .stop =                 ohci_stop,
-       .shutdown =             ohci_shutdown,
-
-#ifdef CONFIG_PM
-       .pci_suspend =          ohci_suspend,
-       .pci_resume =           ohci_resume,
-#endif
-
-       /*
-        * managing i/o requests and associated device resources
-        */
-       .urb_enqueue =          ohci_urb_enqueue,
-       .urb_dequeue =          ohci_urb_dequeue,
-       .endpoint_disable =     ohci_endpoint_disable,
-
-       /*
-        * scheduling support
-        */
-       .get_frame_number =     ohci_get_frame,
-
-       /*
-        * root hub support
-        */
-       .hub_status_data =      ohci_hub_status_data,
-       .hub_control =          ohci_hub_control,
-#ifdef CONFIG_PM
-       .bus_suspend =          ohci_bus_suspend,
-       .bus_resume =           ohci_bus_resume,
-#endif
-       .start_port_reset =     ohci_start_port_reset,
 };
 
-/*-------------------------------------------------------------------------*/
-
-
 static const struct pci_device_id pci_ids [] = { {
        /* handle any USB OHCI controller */
        PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
@@ -377,3 +295,24 @@ static struct pci_driver ohci_pci_driver = {
        },
 #endif
 };
+
+static int __init ohci_pci_init(void)
+{
+       if (usb_disabled())
+               return -ENODEV;
+
+       pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+       ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
+       return pci_register_driver(&ohci_pci_driver);
+}
+module_init(ohci_pci_init);
+
+static void __exit ohci_pci_cleanup(void)
+{
+       pci_unregister_driver(&ohci_pci_driver);
+}
+module_exit(ohci_pci_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
index c3e7287f792170028f89cd65192fda0ac5a3d915..bc30475c3a236886e3784d47ccfdaa4fbd5b7fb3 100644 (file)
  *
  * Licensed under the GNU/GPL. See COPYING for details.
  */
+
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/usb/ohci_pdriver.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI generic platform driver"
+
+static const char hcd_name[] = "ohci-platform";
 
 static int ohci_platform_reset(struct usb_hcd *hcd)
 {
        struct platform_device *pdev = to_platform_device(hcd->self.controller);
        struct usb_ohci_pdata *pdata = pdev->dev.platform_data;
        struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-       int err;
 
        if (pdata->big_endian_desc)
                ohci->flags |= OHCI_QUIRK_BE_DESC;
@@ -30,58 +42,17 @@ static int ohci_platform_reset(struct usb_hcd *hcd)
                ohci->flags |= OHCI_QUIRK_BE_MMIO;
        if (pdata->no_big_frame_no)
                ohci->flags |= OHCI_QUIRK_FRAME_NO;
-
-       ohci_hcd_init(ohci);
-
        if (pdata->num_ports)
                ohci->num_ports = pdata->num_ports;
 
-       err = ohci_init(ohci);
-
-       return err;
-}
-
-static int ohci_platform_start(struct usb_hcd *hcd)
-{
-       struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-       int err;
-
-       err = ohci_run(ohci);
-       if (err < 0) {
-               ohci_err(ohci, "can't start\n");
-               ohci_stop(hcd);
-       }
-
-       return err;
+       return ohci_setup(hcd);
 }
 
-static const struct hc_driver ohci_platform_hc_driver = {
-       .description            = hcd_name,
-       .product_desc           = "Generic Platform OHCI Controller",
-       .hcd_priv_size          = sizeof(struct ohci_hcd),
+static struct hc_driver __read_mostly ohci_platform_hc_driver;
 
-       .irq                    = ohci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB11,
-
-       .reset                  = ohci_platform_reset,
-       .start                  = ohci_platform_start,
-       .stop                   = ohci_stop,
-       .shutdown               = ohci_shutdown,
-
-       .urb_enqueue            = ohci_urb_enqueue,
-       .urb_dequeue            = ohci_urb_dequeue,
-       .endpoint_disable       = ohci_endpoint_disable,
-
-       .get_frame_number       = ohci_get_frame,
-
-       .hub_status_data        = ohci_hub_status_data,
-       .hub_control            = ohci_hub_control,
-#ifdef CONFIG_PM
-       .bus_suspend            = ohci_bus_suspend,
-       .bus_resume             = ohci_bus_resume,
-#endif
-
-       .start_port_reset       = ohci_start_port_reset,
+static const struct ohci_driver_overrides platform_overrides __initconst = {
+       .product_desc = "Generic Platform OHCI controller",
+       .reset =        ohci_platform_reset,
 };
 
 static int ohci_platform_probe(struct platform_device *dev)
@@ -157,7 +128,6 @@ static int ohci_platform_remove(struct platform_device *dev)
 
        usb_remove_hcd(hcd);
        usb_put_hcd(hcd);
-       platform_set_drvdata(dev, NULL);
 
        if (pdata->power_off)
                pdata->power_off(dev);
@@ -223,3 +193,26 @@ static struct platform_driver ohci_platform_driver = {
                .pm     = &ohci_platform_pm_ops,
        }
 };
+
+static int __init ohci_platform_init(void)
+{
+       if (usb_disabled())
+               return -ENODEV;
+
+       pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+       ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
+       return platform_driver_register(&ohci_platform_driver);
+}
+module_init(ohci_platform_init);
+
+static void __exit ohci_platform_cleanup(void)
+{
+       platform_driver_unregister(&ohci_platform_driver);
+}
+module_exit(ohci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
index 64c2ed9ff95e80f9d1a9679711ee99480d6210aa..8294e2fcc2f6288a35169181ee28bf499ff630fe 100644 (file)
@@ -185,8 +185,7 @@ err_rmr:
 
 static int ohci_hcd_ppc_of_remove(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-       dev_set_drvdata(&op->dev, NULL);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
 
@@ -203,7 +202,7 @@ static int ohci_hcd_ppc_of_remove(struct platform_device *op)
 
 static void ohci_hcd_ppc_of_shutdown(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
         if (hcd->driver->shutdown)
                 hcd->driver->shutdown(hcd);
index 279b2ef1741149fe05deda764c09151763f56f1d..3a9c01d8b79c77b4fb607cbe003ab1b3ac6be6d5 100644 (file)
@@ -556,7 +556,6 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
 
        usb_hcd_pxa27x_remove(hcd, pdev);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 88731b7c5f4290332409e4fca11ded93051c639c..df4a6707322d322dc292e4d11bfdf926e34dd3ce 100644 (file)
@@ -41,6 +41,7 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
 __releases(ohci->lock)
 __acquires(ohci->lock)
 {
+        struct device *dev = ohci_to_hcd(ohci)->self.controller;
        // ASSERT (urb->hcpriv != 0);
 
        urb_free_priv (ohci, urb->hcpriv);
@@ -55,7 +56,7 @@ __acquires(ohci->lock)
                        if (quirk_amdiso(ohci))
                                usb_amd_quirk_pll_enable();
                        if (quirk_amdprefetch(ohci))
-                               sb800_prefetch(ohci, 0);
+                               sb800_prefetch(dev, 0);
                }
                break;
        case PIPE_INTERRUPT:
@@ -580,6 +581,7 @@ static void td_submit_urb (
        struct urb      *urb
 ) {
        struct urb_priv *urb_priv = urb->hcpriv;
+       struct device *dev = ohci_to_hcd(ohci)->self.controller;
        dma_addr_t      data;
        int             data_len = urb->transfer_buffer_length;
        int             cnt = 0;
@@ -689,7 +691,7 @@ static void td_submit_urb (
                        if (quirk_amdiso(ohci))
                                usb_amd_quirk_pll_disable();
                        if (quirk_amdprefetch(ohci))
-                               sb800_prefetch(ohci, 1);
+                               sb800_prefetch(dev, 1);
                }
                periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
                        && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
index 3b5b908fd47b29e25f0f90198c85607182152fef..d479d5ddab8853b9844f29ffa7e15b1ca0beaca7 100644 (file)
@@ -207,7 +207,6 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
        sm501_modify_reg(pdev->dev.parent, SM501_IRQ_MASK, 0, 1 << 6);
        sm501_unit_power(pdev->dev.parent, SM501_GATE_USB_HOST, 0);
 
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 3e19e0170d1195711b4f235fd4be043fb234d65e..cc9dd9e4f05e69469eeca87233dd0f29bac23992 100644 (file)
@@ -179,8 +179,6 @@ static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
                spear_stop_ohci(ohci_p);
 
        usb_put_hcd(hcd);
-
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
@@ -232,7 +230,7 @@ static struct platform_driver spear_ohci_hcd_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "spear-ohci",
-               .of_match_table = of_match_ptr(spear_ohci_id_table),
+               .of_match_table = spear_ohci_id_table,
        },
 };
 
index ea73009de6238e3304b230203e4f0f6701d3fcde..197d514fe0d163741d6b6ac3fbb8fa62404c06c5 100644 (file)
@@ -182,7 +182,6 @@ static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev)
        tilegx_stop_ohc();
        gxio_usb_host_destroy(&pdata->usb_ctx);
        destroy_irq(pdata->irq);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 5e3a6deb62b1e7d37b30f0f8a4f534f9e740fda9..ecb09a5ada9ca4d33af566b037f9e5e07fa9bf80 100644 (file)
@@ -287,8 +287,6 @@ static int ohci_hcd_tmio_drv_remove(struct platform_device *dev)
        iounmap(tmio->ccr);
        usb_put_hcd(hcd);
 
-       platform_set_drvdata(dev, NULL);
-
        return 0;
 }
 
index d3299143d9e2cbc8c0f4a2f307da6318252010b8..e2e5faa5a40249666626869f9e0b2d87502e325f 100644 (file)
@@ -421,6 +421,9 @@ struct ohci_hcd {
        struct dentry           *debug_periodic;
        struct dentry           *debug_registers;
 #endif
+       /* platform-specific data -- must come last */
+       unsigned long           priv[0] __aligned(sizeof(s64));
+
 };
 
 #ifdef CONFIG_PCI
@@ -718,3 +721,20 @@ static inline u32 roothub_status (struct ohci_hcd *hc)
        { return ohci_readl (hc, &hc->regs->roothub.status); }
 static inline u32 roothub_portstatus (struct ohci_hcd *hc, int i)
        { return read_roothub (hc, portstatus [i], 0xffe0fce0); }
+
+/* Declarations of things exported for use by ohci platform drivers */
+
+struct ohci_driver_overrides {
+       const char      *product_desc;
+       size_t          extra_priv_size;
+       int             (*reset)(struct usb_hcd *hcd);
+};
+
+extern void    ohci_init_driver(struct hc_driver *drv,
+                               const struct ohci_driver_overrides *over);
+extern int     ohci_restart(struct ohci_hcd *ohci);
+extern int     ohci_setup(struct usb_hcd *hcd);
+#ifdef CONFIG_PM
+extern int     ohci_suspend(struct usb_hcd *hcd, bool do_wakeup);
+extern int     ohci_resume(struct usb_hcd *hcd, bool hibernated);
+#endif
index 0f401dbfaf073bb59f7faad445a19663bded3a0e..4a6df2d8f902f04951cedab23de2ebf49301f786 100644 (file)
@@ -3874,7 +3874,6 @@ static int oxu_drv_probe(struct platform_device *pdev)
 
 error_init:
        kfree(info);
-       platform_set_drvdata(pdev, NULL);
 
 error_alloc:
        iounmap(base);
@@ -3907,7 +3906,6 @@ static int oxu_drv_remove(struct platform_device *pdev)
        release_mem_region(memstart, memlen);
 
        kfree(info);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index 4c338ec03a07d1bfa72ceea172f07e19f151f54b..b9848e4d3d44c8788bfbc43ce196c3b48beae7b4 100644 (file)
@@ -91,6 +91,19 @@ static struct amd_chipset_info {
 
 static DEFINE_SPINLOCK(amd_lock);
 
+void sb800_prefetch(struct device *dev, int on)
+{
+       u16 misc;
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       pci_read_config_word(pdev, 0x50, &misc);
+       if (on == 0)
+               pci_write_config_word(pdev, 0x50, misc & 0xfcff);
+       else
+               pci_write_config_word(pdev, 0x50, misc | 0x0300);
+}
+EXPORT_SYMBOL_GPL(sb800_prefetch);
+
 int usb_amd_find_chipset_info(void)
 {
        u8 rev = 0;
index 7f69a39163ce3b5560f9e0e24a0a9d86e7f77cef..4b8a2092432f8911b5d73053e34b135041fd0e4d 100644 (file)
@@ -11,11 +11,13 @@ void usb_amd_quirk_pll_enable(void);
 bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
 void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
+void sb800_prefetch(struct device *dev, int on);
 #else
 static inline void usb_amd_quirk_pll_disable(void) {}
 static inline void usb_amd_quirk_pll_enable(void) {}
 static inline void usb_amd_dev_put(void) {}
 static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
+static inline void sb800_prefetch(struct device *dev, int on) {}
 #endif  /* CONFIG_PCI */
 
 #endif  /*  __LINUX_USB_PCI_QUIRKS_H  */
index 511bfc46dd7829a3d393e429b11bfa51f19cd6ff..53c23ff7d68506d1febe9946d0cdbe45e68d2ae9 100644 (file)
@@ -157,9 +157,7 @@ err_rmr:
 
 static int uhci_hcd_grlib_remove(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
-
-       dev_set_drvdata(&op->dev, NULL);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        dev_dbg(&op->dev, "stopping GRLIB GRUSBHC UHCI USB Controller\n");
 
@@ -183,7 +181,7 @@ static int uhci_hcd_grlib_remove(struct platform_device *op)
  */
 static void uhci_hcd_grlib_shutdown(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        uhci_hc_died(hcd_to_uhci(hcd));
 }
index f1db61ada6a84c435aedc3967f04f8aab76c1e55..d033a0ec7f0d02bd3874ae5e7339d43e964d2611 100644 (file)
@@ -130,7 +130,6 @@ static int uhci_hcd_platform_remove(struct platform_device *pdev)
        iounmap(hcd->regs);
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
@@ -144,7 +143,7 @@ static int uhci_hcd_platform_remove(struct platform_device *pdev)
  */
 static void uhci_hcd_platform_shutdown(struct platform_device *op)
 {
-       struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+       struct usb_hcd *hcd = platform_get_drvdata(op);
 
        uhci_hc_died(hcd_to_uhci(hcd));
 }
@@ -161,6 +160,6 @@ static struct platform_driver uhci_platform_driver = {
        .driver = {
                .name = "platform-uhci",
                .owner = THIS_MODULE,
-               .of_match_table = of_match_ptr(platform_uhci_ids),
+               .of_match_table = platform_uhci_ids,
        },
 };
index c3a647816af0b7b690172cbf4e90799b47718dd3..ecc88db804e008ba4b7eacb48f68aa1c6a49ded4 100644 (file)
@@ -231,8 +231,6 @@ static struct hc_driver whc_hc_driver = {
 
        .hub_status_data = wusbhc_rh_status_data,
        .hub_control = wusbhc_rh_control,
-       .bus_suspend = wusbhc_rh_suspend,
-       .bus_resume = wusbhc_rh_resume,
        .start_port_reset = wusbhc_rh_start_port_reset,
 };
 
index 5f3a7c74aa8d39404212d342f6236fe1ea48d514..5d5e58fdeccc07604179a419c97414a71def5150 100644 (file)
@@ -503,11 +503,14 @@ static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
        if (last_ep < 31)
                last_ep_ctx = last_ep + 1;
        for (i = 0; i < last_ep_ctx; ++i) {
+               unsigned int epaddr = xhci_get_endpoint_address(i);
                struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
                dma_addr_t dma = ctx->dma +
                        ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
 
-               xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
+               xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
+                               usb_endpoint_out(epaddr) ? "OUT" : "IN",
+                               epaddr & USB_ENDPOINT_NUMBER_MASK, i);
                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
                                &ep_ctx->ep_info,
                                (unsigned long long)dma, ep_ctx->ep_info);
@@ -550,6 +553,11 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
        if (ctx->type == XHCI_CTX_TYPE_INPUT) {
                struct xhci_input_control_ctx *ctrl_ctx =
                        xhci_get_input_control_ctx(xhci, ctx);
+               if (!ctrl_ctx) {
+                       xhci_warn(xhci, "Could not get input context, bad type.\n");
+                       return;
+               }
+
                xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
                         &ctrl_ctx->drop_flags, (unsigned long long)dma,
                         ctrl_ctx->drop_flags);
index 377f4242dabb8cac56456258506b0c3982495b2a..8d7a1324e2f3e7e3859c7574d23cadd3d873c92d 100644 (file)
@@ -71,6 +71,7 @@
 
 /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
 #define XHCI_HLC               (1 << 19)
+#define XHCI_BLC               (1 << 19)
 
 /* command register values to disable interrupts and halt the HC */
 /* start/stop HC execution - do not write unless HC is halted*/
index 187a3ec1069ace87d28b888823e4c4e38053f80b..1d3545943c50a792de25a35a4cfc150b4c5ac347 100644 (file)
@@ -867,18 +867,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_U1_TIMEOUT:
                        if (hcd->speed != HCD_USB3)
                                goto error;
-                       temp = xhci_readl(xhci, port_array[wIndex] + 1);
+                       temp = xhci_readl(xhci, port_array[wIndex] + PORTPMSC);
                        temp &= ~PORT_U1_TIMEOUT_MASK;
                        temp |= PORT_U1_TIMEOUT(timeout);
-                       xhci_writel(xhci, temp, port_array[wIndex] + 1);
+                       xhci_writel(xhci, temp, port_array[wIndex] + PORTPMSC);
                        break;
                case USB_PORT_FEAT_U2_TIMEOUT:
                        if (hcd->speed != HCD_USB3)
                                goto error;
-                       temp = xhci_readl(xhci, port_array[wIndex] + 1);
+                       temp = xhci_readl(xhci, port_array[wIndex] + PORTPMSC);
                        temp &= ~PORT_U2_TIMEOUT_MASK;
                        temp |= PORT_U2_TIMEOUT(timeout);
-                       xhci_writel(xhci, temp, port_array[wIndex] + 1);
+                       xhci_writel(xhci, temp, port_array[wIndex] + PORTPMSC);
                        break;
                default:
                        goto error;
@@ -1098,10 +1098,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                        __le32 __iomem *addr;
                        u32 tmp;
 
-                       /* Add one to the port status register address to get
-                        * the port power control register address.
-                        */
-                       addr = port_array[port_index] + 1;
+                       /* Get the port power control register address. */
+                       addr = port_array[port_index] + PORTPMSC;
                        tmp = xhci_readl(xhci, addr);
                        tmp |= PORT_RWE;
                        xhci_writel(xhci, tmp, addr);
@@ -1193,7 +1191,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                        /* Add one to the port status register address to get
                         * the port power control register address.
                         */
-                       addr = port_array[port_index] + 1;
+                       addr = port_array[port_index] + PORTPMSC;
                        tmp = xhci_readl(xhci, addr);
                        tmp &= ~PORT_RWE;
                        xhci_writel(xhci, tmp, addr);
index fbf75e57628b72e0b7a74237d8a9610826b2ab93..df6978abd7e6f756c6d81ebeef324473e7f36a87 100644 (file)
@@ -358,17 +358,25 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
                                                    int type, gfp_t flags)
 {
-       struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
+       struct xhci_container_ctx *ctx;
+
+       if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
+               return NULL;
+
+       ctx = kzalloc(sizeof(*ctx), flags);
        if (!ctx)
                return NULL;
 
-       BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
        ctx->type = type;
        ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
        if (type == XHCI_CTX_TYPE_INPUT)
                ctx->size += CTX_SIZE(xhci->hcc_params);
 
        ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
+       if (!ctx->bytes) {
+               kfree(ctx);
+               return NULL;
+       }
        memset(ctx->bytes, 0, ctx->size);
        return ctx;
 }
@@ -385,7 +393,9 @@ static void xhci_free_container_ctx(struct xhci_hcd *xhci,
 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
                                              struct xhci_container_ctx *ctx)
 {
-       BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
+       if (ctx->type != XHCI_CTX_TYPE_INPUT)
+               return NULL;
+
        return (struct xhci_input_control_ctx *)ctx->bytes;
 }
 
@@ -1049,6 +1059,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        struct xhci_ep_ctx      *ep0_ctx;
        struct xhci_slot_ctx    *slot_ctx;
        u32                     port_num;
+       u32                     max_packets;
        struct usb_device *top_dev;
 
        dev = xhci->devs[udev->slot_id];
@@ -1066,15 +1077,20 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        switch (udev->speed) {
        case USB_SPEED_SUPER:
                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+               max_packets = MAX_PACKET(512);
                break;
        case USB_SPEED_HIGH:
                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
+               max_packets = MAX_PACKET(64);
                break;
+       /* USB core guesses at a 64-byte max packet first for FS devices */
        case USB_SPEED_FULL:
                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
+               max_packets = MAX_PACKET(64);
                break;
        case USB_SPEED_LOW:
                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
+               max_packets = MAX_PACKET(8);
                break;
        case USB_SPEED_WIRELESS:
                xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -1082,7 +1098,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
                break;
        default:
                /* Speed was set earlier, this shouldn't happen. */
-               BUG();
+               return -EINVAL;
        }
        /* Find the root hub port this device is under */
        port_num = xhci_find_real_port_number(xhci, udev);
@@ -1141,31 +1157,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        /* Step 4 - ring already allocated */
        /* Step 5 */
        ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
-       /*
-        * XXX: Not sure about wireless USB devices.
-        */
-       switch (udev->speed) {
-       case USB_SPEED_SUPER:
-               ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
-               break;
-       case USB_SPEED_HIGH:
-       /* USB core guesses at a 64-byte max packet first for FS devices */
-       case USB_SPEED_FULL:
-               ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
-               break;
-       case USB_SPEED_LOW:
-               ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
-               break;
-       case USB_SPEED_WIRELESS:
-               xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
-               return -EINVAL;
-               break;
-       default:
-               /* New speed? */
-               BUG();
-       }
+
        /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
-       ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
+       ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
+                                        max_packets);
 
        ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
                                   dev->eps[0].ring->cycle_state);
@@ -1338,7 +1333,7 @@ static u32 xhci_get_endpoint_type(struct usb_device *udev,
                else
                        type = EP_TYPE(INT_OUT_EP);
        } else {
-               BUG();
+               type = 0;
        }
        return type;
 }
@@ -1384,10 +1379,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        unsigned int max_burst;
        enum xhci_ring_type type;
        u32 max_esit_payload;
+       u32 endpoint_type;
 
        ep_index = xhci_get_endpoint_index(&ep->desc);
        ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
 
+       endpoint_type = xhci_get_endpoint_type(udev, ep);
+       if (!endpoint_type)
+               return -EINVAL;
+       ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
+
        type = usb_endpoint_type(&ep->desc);
        /* Set up the endpoint ring */
        virt_dev->eps[ep_index].new_ring =
@@ -1416,11 +1417,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
         * CErr shall be set to 0 for Isoch endpoints.
         */
        if (!usb_endpoint_xfer_isoc(&ep->desc))
-               ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
+               ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
        else
-               ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
-
-       ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
+               ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
 
        /* Set the max packet size and max burst */
        max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
@@ -1856,6 +1855,7 @@ no_bw:
        kfree(xhci->usb3_ports);
        kfree(xhci->port_array);
        kfree(xhci->rh_bw);
+       kfree(xhci->ext_caps);
 
        xhci->page_size = 0;
        xhci->page_shift = 0;
@@ -2043,7 +2043,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
 }
 
 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
-               __le32 __iomem *addr, u8 major_revision)
+               __le32 __iomem *addr, u8 major_revision, int max_caps)
 {
        u32 temp, port_offset, port_count;
        int i;
@@ -2068,6 +2068,10 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
                /* WTF? "Valid values are â€˜1’ to MaxPorts" */
                return;
 
+       /* cache usb2 port capabilities */
+       if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
+               xhci->ext_caps[xhci->num_ext_caps++] = temp;
+
        /* Check the host's USB2 LPM capability */
        if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
                        (temp & XHCI_L1C)) {
@@ -2125,10 +2129,11 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
  */
 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
 {
-       __le32 __iomem *addr;
-       u32 offset;
+       __le32 __iomem *addr, *tmp_addr;
+       u32 offset, tmp_offset;
        unsigned int num_ports;
        int i, j, port_index;
+       int cap_count = 0;
 
        addr = &xhci->cap_regs->hcc_params;
        offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
@@ -2161,13 +2166,32 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
         * See section 5.3.6 for offset calculation.
         */
        addr = &xhci->cap_regs->hc_capbase + offset;
+
+       tmp_addr = addr;
+       tmp_offset = offset;
+
+       /* count extended protocol capability entries for later caching */
+       do {
+               u32 cap_id;
+               cap_id = xhci_readl(xhci, tmp_addr);
+               if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
+                       cap_count++;
+               tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
+               tmp_addr += tmp_offset;
+       } while (tmp_offset);
+
+       xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
+       if (!xhci->ext_caps)
+               return -ENOMEM;
+
        while (1) {
                u32 cap_id;
 
                cap_id = xhci_readl(xhci, addr);
                if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
                        xhci_add_in_port(xhci, num_ports, addr,
-                                       (u8) XHCI_EXT_PORT_MAJOR(cap_id));
+                                       (u8) XHCI_EXT_PORT_MAJOR(cap_id),
+                                       cap_count);
                offset = XHCI_EXT_CAPS_NEXT(cap_id);
                if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
                                == num_ports)
index df90fe51b4aa2d406b8b2f8bfa591c410df89a35..51e22bf89505c2bcf360917b8088d5472ccd7ce6 100644 (file)
@@ -130,7 +130,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
                goto unmap_registers;
 
        /* USB 2.0 roothub is stored in the platform_device now. */
-       hcd = dev_get_drvdata(&pdev->dev);
+       hcd = platform_get_drvdata(pdev);
        xhci = hcd_to_xhci(hcd);
        xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
                        dev_name(&pdev->dev), hcd);
@@ -179,6 +179,7 @@ static int xhci_plat_remove(struct platform_device *dev)
 
        usb_remove_hcd(hcd);
        iounmap(hcd->regs);
+       release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
        kfree(xhci);
 
index 1969c001b3f9a8bacbe926dfc4777c95206101a1..1e57eafa69101aaee797b7890020c66865ef4c16 100644 (file)
@@ -1424,6 +1424,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                 */
                ctrl_ctx = xhci_get_input_control_ctx(xhci,
                                virt_dev->in_ctx);
+               if (!ctrl_ctx) {
+                       xhci_warn(xhci, "Could not get input context, bad type.\n");
+                       break;
+               }
                /* Input ctx add_flags are the endpoint index plus one */
                ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
                /* A usb_set_interface() call directly after clearing a halted
@@ -2799,7 +2803,7 @@ hw_died:
        return IRQ_HANDLED;
 }
 
-irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
+irqreturn_t xhci_msi_irq(int irq, void *hcd)
 {
        return xhci_irq(hcd);
 }
index d8f640b12dd9d950e842892858a617b7fa97247e..2c49f00260ca2ad9cd0e79bb3078dbd25bb864d6 100644 (file)
@@ -218,7 +218,7 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
                return ret;
        }
 
-       ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
+       ret = request_irq(pdev->irq, xhci_msi_irq,
                                0, "xhci_hcd", xhci_to_hcd(xhci));
        if (ret) {
                xhci_dbg(xhci, "disable MSI interrupt\n");
@@ -290,7 +290,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
 
        for (i = 0; i < xhci->msix_count; i++) {
                ret = request_irq(xhci->msix_entries[i].vector,
-                               (irq_handler_t)xhci_msi_irq,
+                               xhci_msi_irq,
                                0, "xhci_hcd", xhci_to_hcd(xhci));
                if (ret)
                        goto disable_msix;
@@ -1121,6 +1121,16 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
        return index;
 }
 
+/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
+ * address from the XHCI endpoint index.
+ */
+unsigned int xhci_get_endpoint_address(unsigned int ep_index)
+{
+       unsigned int number = DIV_ROUND_UP(ep_index, 2);
+       unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
+       return direction | number;
+}
+
 /* Find the flag for this endpoint (for use in the control context).  Use the
  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
  * bit 1, etc.
@@ -1225,19 +1235,25 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
                                hw_max_packet_size);
                xhci_dbg(xhci, "Issuing evaluate context command.\n");
 
+               /* Set up the input context flags for the command */
+               /* FIXME: This won't work if a non-default control endpoint
+                * changes max packet sizes.
+                */
+               in_ctx = xhci->devs[slot_id]->in_ctx;
+               ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+               if (!ctrl_ctx) {
+                       xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                                       __func__);
+                       return -ENOMEM;
+               }
                /* Set up the modified control endpoint 0 */
                xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
                                xhci->devs[slot_id]->out_ctx, ep_index);
-               in_ctx = xhci->devs[slot_id]->in_ctx;
+
                ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
                ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
 
-               /* Set up the input context flags for the command */
-               /* FIXME: This won't work if a non-default control endpoint
-                * changes max packet sizes.
-                */
-               ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
                ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
                ctrl_ctx->drop_flags = 0;
 
@@ -1597,6 +1613,12 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        in_ctx = xhci->devs[udev->slot_id]->in_ctx;
        out_ctx = xhci->devs[udev->slot_id]->out_ctx;
        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return 0;
+       }
+
        ep_index = xhci_get_endpoint_index(&ep->desc);
        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
        /* If the HC already knows the endpoint is disabled,
@@ -1691,8 +1713,13 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        in_ctx = virt_dev->in_ctx;
        out_ctx = virt_dev->out_ctx;
        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
-       ep_index = xhci_get_endpoint_index(&ep->desc);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return 0;
+       }
 
+       ep_index = xhci_get_endpoint_index(&ep->desc);
        /* If this endpoint is already in use, and the upper layers are trying
         * to add it again without dropping it, reject the addition.
         */
@@ -1765,12 +1792,18 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
        struct xhci_slot_ctx *slot_ctx;
        int i;
 
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return;
+       }
+
        /* When a device's add flag and drop flag are zero, any subsequent
         * configure endpoint command will leave that endpoint's state
         * untouched.  Make sure we don't leave any old state in the input
         * endpoint contexts.
         */
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
        ctrl_ctx->drop_flags = 0;
        ctrl_ctx->add_flags = 0;
        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
@@ -1877,13 +1910,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
 }
 
 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
-               struct xhci_container_ctx *in_ctx)
+               struct xhci_input_control_ctx *ctrl_ctx)
 {
-       struct xhci_input_control_ctx *ctrl_ctx;
        u32 valid_add_flags;
        u32 valid_drop_flags;
 
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
        /* Ignore the slot flag (bit 0), and the default control endpoint flag
         * (bit 1).  The default control endpoint is added during the Address
         * Device command and is never removed until the slot is disabled.
@@ -1900,13 +1931,11 @@ static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
 }
 
 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
-               struct xhci_container_ctx *in_ctx)
+               struct xhci_input_control_ctx *ctrl_ctx)
 {
-       struct xhci_input_control_ctx *ctrl_ctx;
        u32 valid_add_flags;
        u32 valid_drop_flags;
 
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
        valid_add_flags = ctrl_ctx->add_flags >> 2;
        valid_drop_flags = ctrl_ctx->drop_flags >> 2;
 
@@ -1928,11 +1957,11 @@ static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
  * Must be called with xhci->lock held.
  */
 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
-               struct xhci_container_ctx *in_ctx)
+               struct xhci_input_control_ctx *ctrl_ctx)
 {
        u32 added_eps;
 
-       added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+       added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
        if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
                xhci_dbg(xhci, "Not enough ep ctxs: "
                                "%u active, need to add %u, limit is %u.\n",
@@ -1953,11 +1982,11 @@ static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
  * Must be called with xhci->lock held.
  */
 static void xhci_free_host_resources(struct xhci_hcd *xhci,
-               struct xhci_container_ctx *in_ctx)
+               struct xhci_input_control_ctx *ctrl_ctx)
 {
        u32 num_failed_eps;
 
-       num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+       num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
        xhci->num_active_eps -= num_failed_eps;
        xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
                        num_failed_eps,
@@ -1971,11 +2000,11 @@ static void xhci_free_host_resources(struct xhci_hcd *xhci,
  * Must be called with xhci->lock held.
  */
 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
-               struct xhci_container_ctx *in_ctx)
+               struct xhci_input_control_ctx *ctrl_ctx)
 {
        u32 num_dropped_eps;
 
-       num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
+       num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
        xhci->num_active_eps -= num_dropped_eps;
        if (num_dropped_eps)
                xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
@@ -2470,6 +2499,11 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
                old_active_eps = virt_dev->tt_info->active_eps;
 
        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return -ENOMEM;
+       }
 
        for (i = 0; i < 31; i++) {
                if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
@@ -2554,6 +2588,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
        int timeleft;
        unsigned long flags;
        struct xhci_container_ctx *in_ctx;
+       struct xhci_input_control_ctx *ctrl_ctx;
        struct completion *cmd_completion;
        u32 *cmd_status;
        struct xhci_virt_device *virt_dev;
@@ -2566,9 +2601,16 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
                in_ctx = command->in_ctx;
        else
                in_ctx = virt_dev->in_ctx;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+       if (!ctrl_ctx) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return -ENOMEM;
+       }
 
        if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
-                       xhci_reserve_host_resources(xhci, in_ctx)) {
+                       xhci_reserve_host_resources(xhci, ctrl_ctx)) {
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_warn(xhci, "Not enough host resources, "
                                "active endpoint contexts = %u\n",
@@ -2578,7 +2620,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
        if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
                        xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
                if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
-                       xhci_free_host_resources(xhci, in_ctx);
+                       xhci_free_host_resources(xhci, ctrl_ctx);
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_warn(xhci, "Not enough bandwidth\n");
                return -ENOMEM;
@@ -2614,7 +2656,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
                if (command)
                        list_del(&command->cmd_list);
                if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
-                       xhci_free_host_resources(xhci, in_ctx);
+                       xhci_free_host_resources(xhci, ctrl_ctx);
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
                return -ENOMEM;
@@ -2650,9 +2692,9 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
                 * Otherwise, clean up the estimate to include dropped eps.
                 */
                if (ret)
-                       xhci_free_host_resources(xhci, in_ctx);
+                       xhci_free_host_resources(xhci, ctrl_ctx);
                else
-                       xhci_finish_resource_reservation(xhci, in_ctx);
+                       xhci_finish_resource_reservation(xhci, ctrl_ctx);
                spin_unlock_irqrestore(&xhci->lock, flags);
        }
        return ret;
@@ -2689,6 +2731,11 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 
        /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return -ENOMEM;
+       }
        ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
        ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
        ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
@@ -2767,10 +2814,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
                struct xhci_container_ctx *in_ctx,
                struct xhci_container_ctx *out_ctx,
+               struct xhci_input_control_ctx *ctrl_ctx,
                u32 add_flags, u32 drop_flags)
 {
-       struct xhci_input_control_ctx *ctrl_ctx;
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
        ctrl_ctx->add_flags = cpu_to_le32(add_flags);
        ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
        xhci_slot_copy(xhci, in_ctx, out_ctx);
@@ -2784,14 +2830,22 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
                unsigned int slot_id, unsigned int ep_index,
                struct xhci_dequeue_state *deq_state)
 {
+       struct xhci_input_control_ctx *ctrl_ctx;
        struct xhci_container_ctx *in_ctx;
        struct xhci_ep_ctx *ep_ctx;
        u32 added_ctxs;
        dma_addr_t addr;
 
+       in_ctx = xhci->devs[slot_id]->in_ctx;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return;
+       }
+
        xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
                        xhci->devs[slot_id]->out_ctx, ep_index);
-       in_ctx = xhci->devs[slot_id]->in_ctx;
        ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
        addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
                        deq_state->new_deq_ptr);
@@ -2807,7 +2861,8 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
 
        added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
        xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
-                       xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
+                       xhci->devs[slot_id]->out_ctx, ctrl_ctx,
+                       added_ctxs, added_ctxs);
 }
 
 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
@@ -3065,6 +3120,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
        struct xhci_hcd *xhci;
        struct xhci_virt_device *vdev;
        struct xhci_command *config_cmd;
+       struct xhci_input_control_ctx *ctrl_ctx;
        unsigned int ep_index;
        unsigned int num_stream_ctxs;
        unsigned long flags;
@@ -3086,6 +3142,13 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
                xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
                return -ENOMEM;
        }
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               xhci_free_command(xhci, config_cmd);
+               return -ENOMEM;
+       }
 
        /* Check to make sure all endpoints are not already configured for
         * streams.  While we're at it, find the maximum number of streams that
@@ -3152,7 +3215,8 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
         * and add the updated copy from the input context.
         */
        xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
-                       vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+                       vdev->out_ctx, ctrl_ctx,
+                       changed_ep_bitmask, changed_ep_bitmask);
 
        /* Issue and wait for the configure endpoint command */
        ret = xhci_configure_endpoint(xhci, udev, config_cmd,
@@ -3210,6 +3274,7 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
        struct xhci_hcd *xhci;
        struct xhci_virt_device *vdev;
        struct xhci_command *command;
+       struct xhci_input_control_ctx *ctrl_ctx;
        unsigned int ep_index;
        unsigned long flags;
        u32 changed_ep_bitmask;
@@ -3232,6 +3297,14 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
         */
        ep_index = xhci_get_endpoint_index(&eps[0]->desc);
        command = vdev->eps[ep_index].stream_info->free_streams_command;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+       if (!ctrl_ctx) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return -EINVAL;
+       }
+
        for (i = 0; i < num_eps; i++) {
                struct xhci_ep_ctx *ep_ctx;
 
@@ -3246,7 +3319,8 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
                                &vdev->eps[ep_index]);
        }
        xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
-                       vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+                       vdev->out_ctx, ctrl_ctx,
+                       changed_ep_bitmask, changed_ep_bitmask);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
        /* Issue and wait for the configure endpoint command,
@@ -3686,6 +3760,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        }
 
        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return -EINVAL;
+       }
        /*
         * If this is the first Set Address since device plug-in or
         * virt_device realloaction after a resume with an xHCI power loss,
@@ -3696,7 +3776,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        /* Otherwise, update the control endpoint ring enqueue pointer. */
        else
                xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
        ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
        ctrl_ctx->drop_flags = 0;
 
@@ -3815,6 +3894,63 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
        return raw_port;
 }
 
+/*
+ * Issue an Evaluate Context command to change the Maximum Exit Latency in the
+ * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
+ */
+static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+                       struct usb_device *udev, u16 max_exit_latency)
+{
+       struct xhci_virt_device *virt_dev;
+       struct xhci_command *command;
+       struct xhci_input_control_ctx *ctrl_ctx;
+       struct xhci_slot_ctx *slot_ctx;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               return 0;
+       }
+
+       /* Attempt to issue an Evaluate Context command to change the MEL. */
+       virt_dev = xhci->devs[udev->slot_id];
+       command = xhci->lpm_command;
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+       if (!ctrl_ctx) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               return -ENOMEM;
+       }
+
+       xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
+       ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+       slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+       slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
+       slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
+
+       xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
+       xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
+       xhci_dbg_ctx(xhci, command->in_ctx, 0);
+
+       /* Issue and wait for the evaluate context command. */
+       ret = xhci_configure_endpoint(xhci, udev, command,
+                       true, true);
+       xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
+       xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
+
+       if (!ret) {
+               spin_lock_irqsave(&xhci->lock, flags);
+               virt_dev->current_mel = max_exit_latency;
+               spin_unlock_irqrestore(&xhci->lock, flags);
+       }
+       return ret;
+}
+
 #ifdef CONFIG_PM_RUNTIME
 
 /* BESL to HIRD Encoding array for USB2 LPM */
@@ -3856,6 +3992,28 @@ static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
        return besl;
 }
 
+/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
+static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
+{
+       u32 field;
+       int l1;
+       int besld = 0;
+       int hirdm = 0;
+
+       field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+       /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
+       l1 = udev->l1_params.timeout / 256;
+
+       /* device has preferred BESLD */
+       if (field & USB_BESL_DEEP_VALID) {
+               besld = USB_GET_BESL_DEEP(field);
+               hirdm = 1;
+       }
+
+       return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
+}
+
 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
                                        struct usb_device *udev)
 {
@@ -3911,7 +4069,7 @@ static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
         * Check device's USB 2.0 extension descriptor to determine whether
         * HIRD or BESL shoule be used. See USB2.0 LPM errata.
         */
-       pm_addr = port_array[port_num] + 1;
+       pm_addr = port_array[port_num] + PORTPMSC;
        hird = xhci_calculate_hird_besl(xhci, udev);
        temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
        xhci_writel(xhci, temp, pm_addr);
@@ -3988,11 +4146,12 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        __le32 __iomem  **port_array;
-       __le32 __iomem  *pm_addr;
-       u32             temp;
+       __le32 __iomem  *pm_addr, *hlpm_addr;
+       u32             pm_val, hlpm_val, field;
        unsigned int    port_num;
        unsigned long   flags;
-       int             hird;
+       int             hird, exit_latency;
+       int             ret;
 
        if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
                        !udev->lpm_capable)
@@ -4009,40 +4168,120 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
 
        port_array = xhci->usb2_ports;
        port_num = udev->portnum - 1;
-       pm_addr = port_array[port_num] + 1;
-       temp = xhci_readl(xhci, pm_addr);
+       pm_addr = port_array[port_num] + PORTPMSC;
+       pm_val = xhci_readl(xhci, pm_addr);
+       hlpm_addr = port_array[port_num] + PORTHLPMC;
+       field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
 
        xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
                        enable ? "enable" : "disable", port_num);
 
-       hird = xhci_calculate_hird_besl(xhci, udev);
-
        if (enable) {
-               temp &= ~PORT_HIRD_MASK;
-               temp |= PORT_HIRD(hird) | PORT_RWE;
-               xhci_writel(xhci, temp, pm_addr);
-               temp = xhci_readl(xhci, pm_addr);
-               temp |= PORT_HLE;
-               xhci_writel(xhci, temp, pm_addr);
+               /* Host supports BESL timeout instead of HIRD */
+               if (udev->usb2_hw_lpm_besl_capable) {
+                       /* if device doesn't have a preferred BESL value use a
+                        * default one which works with mixed HIRD and BESL
+                        * systems. See XHCI_DEFAULT_BESL definition in xhci.h
+                        */
+                       if ((field & USB_BESL_SUPPORT) &&
+                           (field & USB_BESL_BASELINE_VALID))
+                               hird = USB_GET_BESL_BASELINE(field);
+                       else
+                               hird = udev->l1_params.besl;
+
+                       exit_latency = xhci_besl_encoding[hird];
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+
+                       /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
+                        * input context for link powermanagement evaluate
+                        * context commands. It is protected by hcd->bandwidth
+                        * mutex and is shared by all devices. We need to set
+                        * the max ext latency in USB 2 BESL LPM as well, so
+                        * use the same mutex and xhci_change_max_exit_latency()
+                        */
+                       mutex_lock(hcd->bandwidth_mutex);
+                       ret = xhci_change_max_exit_latency(xhci, udev,
+                                                          exit_latency);
+                       mutex_unlock(hcd->bandwidth_mutex);
+
+                       if (ret < 0)
+                               return ret;
+                       spin_lock_irqsave(&xhci->lock, flags);
+
+                       hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
+                       xhci_writel(xhci, hlpm_val, hlpm_addr);
+                       /* flush write */
+                       xhci_readl(xhci, hlpm_addr);
+               } else {
+                       hird = xhci_calculate_hird_besl(xhci, udev);
+               }
+
+               pm_val &= ~PORT_HIRD_MASK;
+               pm_val |= PORT_HIRD(hird) | PORT_RWE;
+               xhci_writel(xhci, pm_val, pm_addr);
+               pm_val = xhci_readl(xhci, pm_addr);
+               pm_val |= PORT_HLE;
+               xhci_writel(xhci, pm_val, pm_addr);
+               /* flush write */
+               xhci_readl(xhci, pm_addr);
        } else {
-               temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
-               xhci_writel(xhci, temp, pm_addr);
+               pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
+               xhci_writel(xhci, pm_val, pm_addr);
+               /* flush write */
+               xhci_readl(xhci, pm_addr);
+               if (udev->usb2_hw_lpm_besl_capable) {
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       mutex_lock(hcd->bandwidth_mutex);
+                       xhci_change_max_exit_latency(xhci, udev, 0);
+                       mutex_unlock(hcd->bandwidth_mutex);
+                       return 0;
+               }
        }
 
        spin_unlock_irqrestore(&xhci->lock, flags);
        return 0;
 }
 
+/* check if a usb2 port supports a given extened capability protocol
+ * only USB2 ports extended protocol capability values are cached.
+ * Return 1 if capability is supported
+ */
+static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
+                                          unsigned capability)
+{
+       u32 port_offset, port_count;
+       int i;
+
+       for (i = 0; i < xhci->num_ext_caps; i++) {
+               if (xhci->ext_caps[i] & capability) {
+                       /* port offsets starts at 1 */
+                       port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
+                       port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
+                       if (port >= port_offset &&
+                           port < port_offset + port_count)
+                               return 1;
+               }
+       }
+       return 0;
+}
+
 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        int             ret;
+       int             portnum = udev->portnum - 1;
 
        ret = xhci_usb2_software_lpm_test(hcd, udev);
        if (!ret) {
                xhci_dbg(xhci, "software LPM test succeed\n");
-               if (xhci->hw_lpm_support == 1) {
+               if (xhci->hw_lpm_support == 1 &&
+                   xhci_check_usb2_port_capability(xhci, portnum, XHCI_HLC)) {
                        udev->usb2_hw_lpm_capable = 1;
+                       udev->l1_params.timeout = XHCI_L1_TIMEOUT;
+                       udev->l1_params.besl = XHCI_DEFAULT_BESL;
+                       if (xhci_check_usb2_port_capability(xhci, portnum,
+                                                           XHCI_BLC))
+                               udev->usb2_hw_lpm_besl_capable = 1;
                        ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
                        if (!ret)
                                udev->usb2_hw_lpm_enabled = 1;
@@ -4373,56 +4612,6 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
        return timeout;
 }
 
-/*
- * Issue an Evaluate Context command to change the Maximum Exit Latency in the
- * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
- */
-static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
-                       struct usb_device *udev, u16 max_exit_latency)
-{
-       struct xhci_virt_device *virt_dev;
-       struct xhci_command *command;
-       struct xhci_input_control_ctx *ctrl_ctx;
-       struct xhci_slot_ctx *slot_ctx;
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&xhci->lock, flags);
-       if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
-               spin_unlock_irqrestore(&xhci->lock, flags);
-               return 0;
-       }
-
-       /* Attempt to issue an Evaluate Context command to change the MEL. */
-       virt_dev = xhci->devs[udev->slot_id];
-       command = xhci->lpm_command;
-       xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
-       spin_unlock_irqrestore(&xhci->lock, flags);
-
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
-       ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
-       slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
-       slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
-       slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
-
-       xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
-       xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
-       xhci_dbg_ctx(xhci, command->in_ctx, 0);
-
-       /* Issue and wait for the evaluate context command. */
-       ret = xhci_configure_endpoint(xhci, udev, command,
-                       true, true);
-       xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
-       xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
-
-       if (!ret) {
-               spin_lock_irqsave(&xhci->lock, flags);
-               virt_dev->current_mel = max_exit_latency;
-               spin_unlock_irqrestore(&xhci->lock, flags);
-       }
-       return ret;
-}
-
 static int calculate_max_exit_latency(struct usb_device *udev,
                enum usb3_link_state state_changed,
                u16 hub_encoded_timeout)
@@ -4564,6 +4753,13 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
                xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
                return -ENOMEM;
        }
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
+       if (!ctrl_ctx) {
+               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+                               __func__);
+               xhci_free_command(xhci, config_cmd);
+               return -ENOMEM;
+       }
 
        spin_lock_irqsave(&xhci->lock, flags);
        if (hdev->speed == USB_SPEED_HIGH &&
@@ -4575,7 +4771,6 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
        }
 
        xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
        ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
        slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
        slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
index 77600cefcaf1df6ed3209a41c2a00dcdbb31b4dc..c338741a675d1e254b590e81810f9b48db943f15 100644 (file)
@@ -132,6 +132,11 @@ struct xhci_cap_regs {
 /* Number of registers per port */
 #define        NUM_PORT_REGS   4
 
+#define PORTSC         0
+#define PORTPMSC       1
+#define PORTLI         2
+#define PORTHLPMC      3
+
 /**
  * struct xhci_op_regs - xHCI Host Controller Operational Registers.
  * @command:           USBCMD - xHC command register
@@ -381,6 +386,27 @@ struct xhci_op_regs {
 #define        PORT_L1DS(p)            (((p) & 0xff) << 8)
 #define        PORT_HLE                (1 << 16)
 
+
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT                512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL      4
+
 /**
  * struct xhci_intr_reg - Interrupt Register Set
  * @irq_pending:       IMAN - Interrupt Management Register.  Used to enable
@@ -1532,6 +1558,9 @@ struct xhci_hcd {
        unsigned                sw_lpm_support:1;
        /* support xHCI 1.0 spec USB2 hardware LPM */
        unsigned                hw_lpm_support:1;
+       /* cached usb2 extened protocol capabilites */
+       u32                     *ext_caps;
+       unsigned int            num_ext_caps;
        /* Compliance Mode Recovery Data */
        struct timer_list       comp_mode_recovery_timer;
        u32                     port_status_u0;
@@ -1641,6 +1670,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
                struct usb_device *udev);
 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_address(unsigned int ep_index);
 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
 unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
@@ -1745,7 +1775,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
 
 int xhci_get_frame(struct usb_hcd *hcd);
 irqreturn_t xhci_irq(struct usb_hcd *hcd);
-irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
+irqreturn_t xhci_msi_irq(int irq, void *hcd);
 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
index 284b85461410a82129ed43d7082953d8cb81849e..eb3c8c142fa988de1aaaee07d036e93554bb48a3 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #ifdef CONFIG_USB_DEBUG
 static int debug = 5;
@@ -35,8 +35,8 @@ static int debug = 1;
 
 /* Use our own dbg macro */
 #undef dbg
-#define dbg(lvl, format, arg...)                                       \
-do {                                                                   \
+#define dbg(lvl, format, arg...)       \
+do {                                                           \
        if (debug >= lvl)                                               \
                printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
 } while (0)
@@ -58,12 +58,12 @@ MODULE_PARM_DESC(debug, "Debug enabled or not");
 /* table of devices that work with this driver */
 static const struct usb_device_id device_table[] = {
        { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) },          /* ADU100 */
-       { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) },       /* ADU120 */
-       { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) },       /* ADU130 */
+       { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) },       /* ADU120 */
+       { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) },       /* ADU130 */
        { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+100) },      /* ADU200 */
        { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+108) },      /* ADU208 */
        { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+118) },      /* ADU218 */
-       { }/* Terminating entry */
+       { } /* Terminating entry */
 };
 
 MODULE_DEVICE_TABLE(usb, device_table);
@@ -92,16 +92,16 @@ MODULE_DEVICE_TABLE(usb, device_table);
 /* Structure to hold all of our device specific stuff */
 struct adu_device {
        struct mutex            mtx;
-       struct usb_device*      udev; /* save off the usb device pointer */
-       struct usb_interface*   interface;
+       struct usb_device *udev; /* save off the usb device pointer */
+       struct usb_interface *interface;
        unsigned int            minor; /* the starting minor number for this device */
        char                    serial_number[8];
 
        int                     open_count; /* number of times this port has been opened */
 
-       char*                   read_buffer_primary;
+       char            *read_buffer_primary;
        int                     read_buffer_length;
-       char*                   read_buffer_secondary;
+       char            *read_buffer_secondary;
        int                     secondary_head;
        int                     secondary_tail;
        spinlock_t              buflock;
@@ -109,14 +109,14 @@ struct adu_device {
        wait_queue_head_t       read_wait;
        wait_queue_head_t       write_wait;
 
-       char*                   interrupt_in_buffer;
-       struct usb_endpoint_descriptorinterrupt_in_endpoint;
-       struct urb*             interrupt_in_urb;
+       char            *interrupt_in_buffer;
+       struct usb_endpoint_descriptor *interrupt_in_endpoint;
+       struct urb      *interrupt_in_urb;
        int                     read_urb_finished;
 
-       char*                   interrupt_out_buffer;
-       struct usb_endpoint_descriptorinterrupt_out_endpoint;
-       struct urb*             interrupt_out_urb;
+       char            *interrupt_out_buffer;
+       struct usb_endpoint_descriptor *interrupt_out_endpoint;
+       struct urb      *interrupt_out_urb;
        int                     out_urb_finished;
 };
 
@@ -147,10 +147,10 @@ static void adu_abort_transfers(struct adu_device *dev)
 {
        unsigned long flags;
 
-       dbg(2," %s : enter", __func__);
+       dbg(2, " %s : enter", __func__);
 
        if (dev->udev == NULL) {
-               dbg(1," %s : udev is null", __func__);
+               dbg(1, " %s : udev is null", __func__);
                goto exit;
        }
 
@@ -172,7 +172,7 @@ static void adu_abort_transfers(struct adu_device *dev)
                spin_unlock_irqrestore(&dev->buflock, flags);
 
 exit:
-       dbg(2," %s : leave", __func__);
+       dbg(2, " %s : leave", __func__);
 }
 
 static void adu_delete(struct adu_device *dev)
@@ -196,7 +196,7 @@ static void adu_interrupt_in_callback(struct urb *urb)
        struct adu_device *dev = urb->context;
        int status = urb->status;
 
-       dbg(4," %s : enter, status %d", __func__, status);
+       dbg(4, " %s : enter, status %d", __func__, status);
        adu_debug_data(5, __func__, urb->actual_length,
                       urb->transfer_buffer);
 
@@ -205,7 +205,7 @@ static void adu_interrupt_in_callback(struct urb *urb)
        if (status != 0) {
                if ((status != -ENOENT) && (status != -ECONNRESET) &&
                        (status != -ESHUTDOWN)) {
-                       dbg(1," %s : nonzero status received: %d",
+                       dbg(1, " %s : nonzero status received: %d",
                            __func__, status);
                }
                goto exit;
@@ -220,10 +220,10 @@ static void adu_interrupt_in_callback(struct urb *urb)
                                dev->interrupt_in_buffer, urb->actual_length);
 
                        dev->read_buffer_length += urb->actual_length;
-                       dbg(2," %s reading  %d ", __func__,
+                       dbg(2, " %s reading  %d ", __func__,
                            urb->actual_length);
                } else {
-                       dbg(1," %s : read_buffer overflow", __func__);
+                       dbg(1, " %s : read_buffer overflow", __func__);
                }
        }
 
@@ -234,7 +234,7 @@ exit:
        wake_up_interruptible(&dev->read_wait);
        adu_debug_data(5, __func__, urb->actual_length,
                       urb->transfer_buffer);
-       dbg(4," %s : leave, status %d", __func__, status);
+       dbg(4, " %s : leave, status %d", __func__, status);
 }
 
 static void adu_interrupt_out_callback(struct urb *urb)
@@ -242,8 +242,8 @@ static void adu_interrupt_out_callback(struct urb *urb)
        struct adu_device *dev = urb->context;
        int status = urb->status;
 
-       dbg(4," %s : enter, status %d", __func__, status);
-       adu_debug_data(5,__func__, urb->actual_length, urb->transfer_buffer);
+       dbg(4, " %s : enter, status %d", __func__, status);
+       adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer);
 
        if (status != 0) {
                if ((status != -ENOENT) &&
@@ -262,7 +262,7 @@ exit:
 
        adu_debug_data(5, __func__, urb->actual_length,
                       urb->transfer_buffer);
-       dbg(4," %s : leave, status %d", __func__, status);
+       dbg(4, " %s : leave, status %d", __func__, status);
 }
 
 static int adu_open(struct inode *inode, struct file *file)
@@ -272,11 +272,12 @@ static int adu_open(struct inode *inode, struct file *file)
        int subminor;
        int retval;
 
-       dbg(2,"%s : enter", __func__);
+       dbg(2, "%s : enter", __func__);
 
        subminor = iminor(inode);
 
-       if ((retval = mutex_lock_interruptible(&adutux_mutex))) {
+       retval = mutex_lock_interruptible(&adutux_mutex);
+       if (retval) {
                dbg(2, "%s : mutex lock failed", __func__);
                goto exit_no_lock;
        }
@@ -302,7 +303,7 @@ static int adu_open(struct inode *inode, struct file *file)
        }
 
        ++dev->open_count;
-       dbg(2,"%s : open count %d", __func__, dev->open_count);
+       dbg(2, "%s : open count %d", __func__, dev->open_count);
 
        /* save device in the file's private structure */
        file->private_data = dev;
@@ -311,7 +312,7 @@ static int adu_open(struct inode *inode, struct file *file)
        dev->read_buffer_length = 0;
 
        /* fixup first read by having urb waiting for it */
-       usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
+       usb_fill_int_urb(dev->interrupt_in_urb, dev->udev,
                         usb_rcvintpipe(dev->udev,
                                        dev->interrupt_in_endpoint->bEndpointAddress),
                         dev->interrupt_in_buffer,
@@ -332,23 +333,23 @@ static int adu_open(struct inode *inode, struct file *file)
 exit_no_device:
        mutex_unlock(&adutux_mutex);
 exit_no_lock:
-       dbg(2,"%s : leave, return value %d ", __func__, retval);
+       dbg(2, "%s : leave, return value %d ", __func__, retval);
        return retval;
 }
 
 static void adu_release_internal(struct adu_device *dev)
 {
-       dbg(2," %s : enter", __func__);
+       dbg(2, " %s : enter", __func__);
 
        /* decrement our usage count for the device */
        --dev->open_count;
-       dbg(2," %s : open count %d", __func__, dev->open_count);
+       dbg(2, " %s : open count %d", __func__, dev->open_count);
        if (dev->open_count <= 0) {
                adu_abort_transfers(dev);
                dev->open_count = 0;
        }
 
-       dbg(2," %s : leave", __func__);
+       dbg(2, " %s : leave", __func__);
 }
 
 static int adu_release(struct inode *inode, struct file *file)
@@ -356,17 +357,17 @@ static int adu_release(struct inode *inode, struct file *file)
        struct adu_device *dev;
        int retval = 0;
 
-       dbg(2," %s : enter", __func__);
+       dbg(2, " %s : enter", __func__);
 
        if (file == NULL) {
-               dbg(1," %s : file is NULL", __func__);
+               dbg(1, " %s : file is NULL", __func__);
                retval = -ENODEV;
                goto exit;
        }
 
        dev = file->private_data;
        if (dev == NULL) {
-               dbg(1," %s : object is NULL", __func__);
+               dbg(1, " %s : object is NULL", __func__);
                retval = -ENODEV;
                goto exit;
        }
@@ -374,7 +375,7 @@ static int adu_release(struct inode *inode, struct file *file)
        mutex_lock(&adutux_mutex); /* not interruptible */
 
        if (dev->open_count <= 0) {
-               dbg(1," %s : device not opened", __func__);
+               dbg(1, " %s : device not opened", __func__);
                retval = -ENODEV;
                goto unlock;
        }
@@ -388,7 +389,7 @@ static int adu_release(struct inode *inode, struct file *file)
 unlock:
        mutex_unlock(&adutux_mutex);
 exit:
-       dbg(2," %s : leave, return value %d", __func__, retval);
+       dbg(2, " %s : leave, return value %d", __func__, retval);
        return retval;
 }
 
@@ -405,10 +406,10 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
        unsigned long flags;
        DECLARE_WAITQUEUE(wait, current);
 
-       dbg(2," %s : enter, count = %Zd, file=%p", __func__, count, file);
+       dbg(2, " %s : enter, count = %Zd, file=%p", __func__, count, file);
 
        dev = file->private_data;
-       dbg(2," %s : dev=%p", __func__, dev);
+       dbg(2, " %s : dev=%p", __func__, dev);
 
        if (mutex_lock_interruptible(&dev->mtx))
                return -ERESTARTSYS;
@@ -423,15 +424,15 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
 
        /* verify that some data was requested */
        if (count == 0) {
-               dbg(1," %s : read request of 0 bytes", __func__);
+               dbg(1, " %s : read request of 0 bytes", __func__);
                goto exit;
        }
 
        timeout = COMMAND_TIMEOUT;
-       dbg(2," %s : about to start looping", __func__);
+       dbg(2, " %s : about to start looping", __func__);
        while (bytes_to_read) {
                int data_in_secondary = dev->secondary_tail - dev->secondary_head;
-               dbg(2," %s : while, data_in_secondary=%d, status=%d",
+               dbg(2, " %s : while, data_in_secondary=%d, status=%d",
                    __func__, data_in_secondary,
                    dev->interrupt_in_urb->status);
 
@@ -456,7 +457,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                        if (dev->read_buffer_length) {
                                /* we secure access to the primary */
                                char *tmp;
-                               dbg(2," %s : swap, read_buffer_length = %d",
+                               dbg(2, " %s : swap, read_buffer_length = %d",
                                    __func__, dev->read_buffer_length);
                                tmp = dev->read_buffer_secondary;
                                dev->read_buffer_secondary = dev->read_buffer_primary;
@@ -472,16 +473,16 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                                if (!dev->read_urb_finished) {
                                        /* somebody is doing IO */
                                        spin_unlock_irqrestore(&dev->buflock, flags);
-                                       dbg(2," %s : submitted already", __func__);
+                                       dbg(2, " %s : submitted already", __func__);
                                } else {
                                        /* we must initiate input */
-                                       dbg(2," %s : initiate input", __func__);
+                                       dbg(2, " %s : initiate input", __func__);
                                        dev->read_urb_finished = 0;
                                        spin_unlock_irqrestore(&dev->buflock, flags);
 
-                                       usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
-                                                        usb_rcvintpipe(dev->udev,
-                                                                       dev->interrupt_in_endpoint->bEndpointAddress),
+                                       usb_fill_int_urb(dev->interrupt_in_urb, dev->udev,
+                                                       usb_rcvintpipe(dev->udev,
+                                                               dev->interrupt_in_endpoint->bEndpointAddress),
                                                         dev->interrupt_in_buffer,
                                                         usb_endpoint_maxp(dev->interrupt_in_endpoint),
                                                         adu_interrupt_in_callback,
@@ -493,7 +494,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                                                if (retval == -ENOMEM) {
                                                        retval = bytes_read ? bytes_read : -ENOMEM;
                                                }
-                                               dbg(2," %s : submit failed", __func__);
+                                               dbg(2, " %s : submit failed", __func__);
                                                goto exit;
                                        }
                                }
@@ -512,13 +513,13 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                                remove_wait_queue(&dev->read_wait, &wait);
 
                                if (timeout <= 0) {
-                                       dbg(2," %s : timeout", __func__);
+                                       dbg(2, " %s : timeout", __func__);
                                        retval = bytes_read ? bytes_read : -ETIMEDOUT;
                                        goto exit;
                                }
 
                                if (signal_pending(current)) {
-                                       dbg(2," %s : signal pending", __func__);
+                                       dbg(2, " %s : signal pending", __func__);
                                        retval = bytes_read ? bytes_read : -EINTR;
                                        goto exit;
                                }
@@ -532,9 +533,9 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
        if (should_submit && dev->read_urb_finished) {
                dev->read_urb_finished = 0;
                spin_unlock_irqrestore(&dev->buflock, flags);
-               usb_fill_int_urb(dev->interrupt_in_urb,dev->udev,
+               usb_fill_int_urb(dev->interrupt_in_urb, dev->udev,
                                 usb_rcvintpipe(dev->udev,
-                                               dev->interrupt_in_endpoint->bEndpointAddress),
+                                       dev->interrupt_in_endpoint->bEndpointAddress),
                                dev->interrupt_in_buffer,
                                usb_endpoint_maxp(dev->interrupt_in_endpoint),
                                adu_interrupt_in_callback,
@@ -551,7 +552,7 @@ exit:
        /* unlock the device */
        mutex_unlock(&dev->mtx);
 
-       dbg(2," %s : leave, return value %d", __func__, retval);
+       dbg(2, " %s : leave, return value %d", __func__, retval);
        return retval;
 }
 
@@ -566,7 +567,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
        unsigned long flags;
        int retval;
 
-       dbg(2," %s : enter, count = %Zd", __func__, count);
+       dbg(2, " %s : enter, count = %Zd", __func__, count);
 
        dev = file->private_data;
 
@@ -584,7 +585,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
 
        /* verify that we actually have some data to write */
        if (count == 0) {
-               dbg(1," %s : write request of 0 bytes", __func__);
+               dbg(1, " %s : write request of 0 bytes", __func__);
                goto exit;
        }
 
@@ -597,7 +598,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
 
                        mutex_unlock(&dev->mtx);
                        if (signal_pending(current)) {
-                               dbg(1," %s : interrupted", __func__);
+                               dbg(1, " %s : interrupted", __func__);
                                set_current_state(TASK_RUNNING);
                                retval = -EINTR;
                                goto exit_onqueue;
@@ -614,17 +615,17 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
                                goto exit_nolock;
                        }
 
-                       dbg(4," %s : in progress, count = %Zd", __func__, count);
+                       dbg(4, " %s : in progress, count = %Zd", __func__, count);
                } else {
                        spin_unlock_irqrestore(&dev->buflock, flags);
                        set_current_state(TASK_RUNNING);
                        remove_wait_queue(&dev->write_wait, &waita);
-                       dbg(4," %s : sending, count = %Zd", __func__, count);
+                       dbg(4, " %s : sending, count = %Zd", __func__, count);
 
                        /* write the data into interrupt_out_buffer from userspace */
                        buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint);
                        bytes_to_write = count > buffer_size ? buffer_size : count;
-                       dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd",
+                       dbg(4, " %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd",
                            __func__, buffer_size, count, bytes_to_write);
 
                        if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) {
@@ -664,7 +665,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
 exit:
        mutex_unlock(&dev->mtx);
 exit_nolock:
-       dbg(2," %s : leave, return value %d", __func__, retval);
+       dbg(2, " %s : leave, return value %d", __func__, retval);
        return retval;
 
 exit_onqueue:
@@ -710,7 +711,7 @@ static int adu_probe(struct usb_interface *interface,
        int out_end_size;
        int i;
 
-       dbg(2," %s : enter", __func__);
+       dbg(2, " %s : enter", __func__);
 
        if (udev == NULL) {
                dev_err(&interface->dev, "udev is NULL.\n");
@@ -811,7 +812,7 @@ static int adu_probe(struct usb_interface *interface,
                dev_err(&interface->dev, "Could not retrieve serial number\n");
                goto error;
        }
-       dbg(2," %s : serial_number=%s", __func__, dev->serial_number);
+       dbg(2, " %s : serial_number=%s", __func__, dev->serial_number);
 
        /* we can register the device now, as it is ready */
        usb_set_intfdata(interface, dev);
@@ -832,7 +833,7 @@ static int adu_probe(struct usb_interface *interface,
                 udev->descriptor.idProduct, dev->serial_number,
                 (dev->minor - ADU_MINOR_BASE));
 exit:
-       dbg(2," %s : leave, return value %p (dev)", __func__, dev);
+       dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
 
        return retval;
 
@@ -851,7 +852,7 @@ static void adu_disconnect(struct usb_interface *interface)
        struct adu_device *dev;
        int minor;
 
-       dbg(2," %s : enter", __func__);
+       dbg(2, " %s : enter", __func__);
 
        dev = usb_get_intfdata(interface);
 
@@ -865,7 +866,7 @@ static void adu_disconnect(struct usb_interface *interface)
        usb_set_intfdata(interface, NULL);
 
        /* if the device is not opened, then we clean up right now */
-       dbg(2," %s : open count %d", __func__, dev->open_count);
+       dbg(2, " %s : open count %d", __func__, dev->open_count);
        if (!dev->open_count)
                adu_delete(dev);
 
@@ -874,7 +875,7 @@ static void adu_disconnect(struct usb_interface *interface)
        dev_info(&interface->dev, "ADU device adutux%d now disconnected\n",
                 (minor - ADU_MINOR_BASE));
 
-       dbg(2," %s : leave", __func__);
+       dbg(2, " %s : leave", __func__);
 }
 
 /* usb specific object needed to register this driver with the usb subsystem */
index 411e605f448af32f07fd02034917aa61f245ea4c..a638c4e9a947b38a140602c78ee3807de9f90652 100644 (file)
@@ -208,7 +208,7 @@ sisusbcon_init(struct vc_data *c, int init)
        struct sisusb_usb_data *sisusb;
        int cols, rows;
 
-       /* This is called by take_over_console(),
+       /* This is called by do_take_over_console(),
         * ie by us/under our control. It is
         * only called after text mode and fonts
         * are set up/restored.
@@ -273,7 +273,7 @@ sisusbcon_deinit(struct vc_data *c)
        struct sisusb_usb_data *sisusb;
        int i;
 
-       /* This is called by take_over_console()
+       /* This is called by do_take_over_console()
         * and others, ie not under our control.
         */
 
@@ -1490,8 +1490,9 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
        mutex_unlock(&sisusb->lock);
 
        /* Now grab the desired console(s) */
-       ret = take_over_console(&sisusb_con, first - 1, last - 1, 0);
-
+       console_lock();
+       ret = do_take_over_console(&sisusb_con, first - 1, last - 1, 0);
+       console_unlock();
        if (!ret)
                sisusb->haveconsole = 1;
        else {
@@ -1535,11 +1536,14 @@ sisusb_console_exit(struct sisusb_usb_data *sisusb)
 
        if (sisusb->haveconsole) {
                for (i = 0; i < MAX_NR_CONSOLES; i++)
-                       if (sisusb->havethisconsole[i])
-                               take_over_console(&sisusb_dummy_con, i, i, 0);
+                       if (sisusb->havethisconsole[i]) {
+                               console_lock();
+                               do_take_over_console(&sisusb_dummy_con, i, i, 0);
+                               console_unlock();
                                /* At this point, con_deinit for all our
-                                * consoles is executed by take_over_console().
+                                * consoles is executed by do_take_over_console().
                                 */
+                       }
                sisusb->haveconsole = 0;
        }
 
index d3a1cce1bf9c511710993002d83391f8f6a6fb35..c3578393ddeffc427906f362daa8d0ca39576e10 100644 (file)
@@ -42,9 +42,6 @@
 #define USB3503_NRD            0x09
 
 #define USB3503_PDS            0x0a
-#define USB3503_PORT1          (1 << 1)
-#define USB3503_PORT2          (1 << 2)
-#define USB3503_PORT3          (1 << 3)
 
 #define USB3503_SP_ILOCK       0xe7
 #define USB3503_SPILOCK_CONNECT        (1 << 1)
@@ -56,6 +53,7 @@
 struct usb3503 {
        enum usb3503_mode       mode;
        struct i2c_client       *client;
+       u8      port_off_mask;
        int     gpio_intn;
        int     gpio_reset;
        int     gpio_connect;
@@ -107,11 +105,9 @@ static int usb3503_reset(int gpio_reset, int state)
        if (gpio_is_valid(gpio_reset))
                gpio_set_value(gpio_reset, state);
 
-       /* Wait RefClk when RESET_N is released, otherwise Hub will
-        * not transition to Hub Communication Stage.
-        */
+       /* Wait T_HUBINIT == 4ms for hub logic to stabilize */
        if (state)
-               msleep(100);
+               usleep_range(4000, 10000);
 
        return 0;
 }
@@ -134,12 +130,14 @@ static int usb3503_switch_mode(struct usb3503 *hub, enum usb3503_mode mode)
                        goto err_hubmode;
                }
 
-               /* PDS : Port2,3 Disable For Self Powered Operation */
-               err = usb3503_set_bits(i2c, USB3503_PDS,
-                               (USB3503_PORT2 | USB3503_PORT3));
-               if (err < 0) {
-                       dev_err(&i2c->dev, "PDS failed (%d)\n", err);
-                       goto err_hubmode;
+               /* PDS : Disable For Self Powered Operation */
+               if (hub->port_off_mask) {
+                       err = usb3503_set_bits(i2c, USB3503_PDS,
+                                       hub->port_off_mask);
+                       if (err < 0) {
+                               dev_err(&i2c->dev, "PDS failed (%d)\n", err);
+                               goto err_hubmode;
+                       }
                }
 
                /* CFG1 : SELF_BUS_PWR -> Self-Powerd operation */
@@ -186,6 +184,8 @@ static int usb3503_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        struct usb3503 *hub;
        int err = -ENOMEM;
        u32 mode = USB3503_MODE_UNKNOWN;
+       const u32 *property;
+       int len;
 
        hub = kzalloc(sizeof(struct usb3503), GFP_KERNEL);
        if (!hub) {
@@ -197,18 +197,31 @@ static int usb3503_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        hub->client = i2c;
 
        if (pdata) {
+               hub->port_off_mask      = pdata->port_off_mask;
                hub->gpio_intn          = pdata->gpio_intn;
                hub->gpio_connect       = pdata->gpio_connect;
                hub->gpio_reset         = pdata->gpio_reset;
                hub->mode               = pdata->initial_mode;
        } else if (np) {
+               hub->port_off_mask = 0;
+
+               property = of_get_property(np, "disabled-ports", &len);
+               if (property && (len / sizeof(u32)) > 0) {
+                       int i;
+                       for (i = 0; i < len / sizeof(u32); i++) {
+                               u32 port = be32_to_cpu(property[i]);
+                               if ((1 <= port) && (port <= 3))
+                                       hub->port_off_mask |= (1 << port);
+                       }
+               }
+
                hub->gpio_intn  = of_get_named_gpio(np, "connect-gpios", 0);
                if (hub->gpio_intn == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
                hub->gpio_connect = of_get_named_gpio(np, "intn-gpios", 0);
                if (hub->gpio_connect == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
-               hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
+               hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
                if (hub->gpio_reset == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
                of_property_read_u32(np, "initial-mode", &mode);
index 06f8d29af1ef7a74dfb0d2c0ae61ea58e36e2a74..797e3fd455102b5365d4730db5659fce1f2ded43 100644 (file)
@@ -27,6 +27,35 @@ config USB_MUSB_HDRC
 
 if USB_MUSB_HDRC
 
+choice
+       bool "MUSB Mode Selection"
+       default USB_MUSB_DUAL_ROLE if (USB && USB_GADGET)
+       default USB_MUSB_HOST if (USB && !USB_GADGET)
+       default USB_MUSB_GADGET if (!USB && USB_GADGET)
+
+config USB_MUSB_HOST
+       bool "Host only mode"
+       depends on USB
+       help
+         Select this when you want to use MUSB in host mode only,
+         thereby the gadget feature will be regressed.
+
+config USB_MUSB_GADGET
+       bool "Gadget only mode"
+       depends on USB_GADGET
+       help
+         Select this when you want to use MUSB in gadget mode only,
+         thereby the host feature will be regressed.
+
+config USB_MUSB_DUAL_ROLE
+       bool "Dual Role mode"
+       depends on (USB && USB_GADGET)
+       help
+         This is the default mode of working of MUSB controller where
+         both host and gadget features are enabled.
+
+endchoice
+
 choice
        prompt "Platform Glue Layer"
 
index 3b858715b5eaa9808a7b1ac179e7396981943619..2b82ed7c85ca9baa0498d1dabd7a2aeca6b59e27 100644 (file)
@@ -6,8 +6,8 @@ obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
 
 musb_hdrc-y := musb_core.o
 
-musb_hdrc-y                                    += musb_gadget_ep0.o musb_gadget.o
-musb_hdrc-y                                    += musb_virthub.o musb_host.o
+musb_hdrc-$(CONFIG_USB_MUSB_HOST)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_virthub.o musb_host.o
+musb_hdrc-$(CONFIG_USB_MUSB_GADGET)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_gadget_ep0.o musb_gadget.o
 musb_hdrc-$(CONFIG_DEBUG_FS)                   += musb_debugfs.o
 
 # Hardware Glue Layer
index 5e63b160db0c10bd2087cbfcf21c611c5ce3a7af..6ba8439bd5a6e037d124a2fd5db152387fa3dd5a 100644 (file)
@@ -450,6 +450,7 @@ static u64 bfin_dmamask = DMA_BIT_MASK(32);
 
 static int bfin_probe(struct platform_device *pdev)
 {
+       struct resource musb_resources[2];
        struct musb_hdrc_platform_data  *pdata = pdev->dev.platform_data;
        struct platform_device          *musb;
        struct bfin_glue                *glue;
@@ -479,8 +480,21 @@ static int bfin_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
+       memset(musb_resources, 0x00, sizeof(*musb_resources) *
+                       ARRAY_SIZE(musb_resources));
+
+       musb_resources[0].name = pdev->resource[0].name;
+       musb_resources[0].start = pdev->resource[0].start;
+       musb_resources[0].end = pdev->resource[0].end;
+       musb_resources[0].flags = pdev->resource[0].flags;
+
+       musb_resources[1].name = pdev->resource[1].name;
+       musb_resources[1].start = pdev->resource[1].start;
+       musb_resources[1].end = pdev->resource[1].end;
+       musb_resources[1].flags = pdev->resource[1].flags;
+
+       ret = platform_device_add_resources(musb, musb_resources,
+                       ARRAY_SIZE(musb_resources));
        if (ret) {
                dev_err(&pdev->dev, "failed to add resources\n");
                goto err3;
index b903b744a2249d59b1a2aa681dfb2b3b70b6b5c4..0da6f648a9fe1393faab16726bcc8d8725eb316c 100644 (file)
@@ -476,6 +476,7 @@ static u64 da8xx_dmamask = DMA_BIT_MASK(32);
 
 static int da8xx_probe(struct platform_device *pdev)
 {
+       struct resource musb_resources[2];
        struct musb_hdrc_platform_data  *pdata = pdev->dev.platform_data;
        struct platform_device          *musb;
        struct da8xx_glue               *glue;
@@ -521,8 +522,21 @@ static int da8xx_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
+       memset(musb_resources, 0x00, sizeof(*musb_resources) *
+                       ARRAY_SIZE(musb_resources));
+
+       musb_resources[0].name = pdev->resource[0].name;
+       musb_resources[0].start = pdev->resource[0].start;
+       musb_resources[0].end = pdev->resource[0].end;
+       musb_resources[0].flags = pdev->resource[0].flags;
+
+       musb_resources[1].name = pdev->resource[1].name;
+       musb_resources[1].start = pdev->resource[1].start;
+       musb_resources[1].end = pdev->resource[1].end;
+       musb_resources[1].flags = pdev->resource[1].flags;
+
+       ret = platform_device_add_resources(musb, musb_resources,
+                       ARRAY_SIZE(musb_resources));
        if (ret) {
                dev_err(&pdev->dev, "failed to add resources\n");
                goto err5;
index bea6cc35471c5f73bd5e26607e06d2087df08fe1..f8aeaf2e2cd1f88ba2526861b2b932b6d6081050 100644 (file)
@@ -509,6 +509,7 @@ static u64 davinci_dmamask = DMA_BIT_MASK(32);
 
 static int davinci_probe(struct platform_device *pdev)
 {
+       struct resource musb_resources[2];
        struct musb_hdrc_platform_data  *pdata = pdev->dev.platform_data;
        struct platform_device          *musb;
        struct davinci_glue             *glue;
@@ -553,8 +554,21 @@ static int davinci_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
+       memset(musb_resources, 0x00, sizeof(*musb_resources) *
+                       ARRAY_SIZE(musb_resources));
+
+       musb_resources[0].name = pdev->resource[0].name;
+       musb_resources[0].start = pdev->resource[0].start;
+       musb_resources[0].end = pdev->resource[0].end;
+       musb_resources[0].flags = pdev->resource[0].flags;
+
+       musb_resources[1].name = pdev->resource[1].name;
+       musb_resources[1].start = pdev->resource[1].start;
+       musb_resources[1].end = pdev->resource[1].end;
+       musb_resources[1].flags = pdev->resource[1].flags;
+
+       ret = platform_device_add_resources(musb, musb_resources,
+                       ARRAY_SIZE(musb_resources));
        if (ret) {
                dev_err(&pdev->dev, "failed to add resources\n");
                goto err5;
index 37a261a6bb6aa1d75d8e9a0b27e787d0cffeff41..29a24ced67483d8942f943013ee27a3645902112 100644 (file)
@@ -380,7 +380,6 @@ static void musb_otg_timer_func(unsigned long data)
                dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
                        usb_otg_state_string(musb->xceiv->state));
        }
-       musb->ignore_disconnect = 0;
        spin_unlock_irqrestore(&musb->lock, flags);
 }
 
@@ -389,7 +388,7 @@ static void musb_otg_timer_func(unsigned long data)
  */
 void musb_hnp_stop(struct musb *musb)
 {
-       struct usb_hcd  *hcd = musb_to_hcd(musb);
+       struct usb_hcd  *hcd = musb->hcd;
        void __iomem    *mbase = musb->mregs;
        u8      reg;
 
@@ -404,7 +403,8 @@ void musb_hnp_stop(struct musb *musb)
                break;
        case OTG_STATE_B_HOST:
                dev_dbg(musb->controller, "HNP: Disabling HR\n");
-               hcd->self.is_b_host = 0;
+               if (hcd)
+                       hcd->self.is_b_host = 0;
                musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                MUSB_DEV_MODE(musb);
                reg = musb_readb(mbase, MUSB_POWER);
@@ -484,7 +484,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 
                                musb->xceiv->state = OTG_STATE_A_HOST;
                                musb->is_active = 1;
-                               usb_hcd_resume_root_hub(musb_to_hcd(musb));
+                               musb_host_resume_root_hub(musb);
                                break;
                        case OTG_STATE_B_WAIT_ACON:
                                musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
@@ -501,7 +501,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        case OTG_STATE_A_SUSPEND:
                                /* possibly DISCONNECT is upcoming */
                                musb->xceiv->state = OTG_STATE_A_HOST;
-                               usb_hcd_resume_root_hub(musb_to_hcd(musb));
+                               musb_host_resume_root_hub(musb);
                                break;
                        case OTG_STATE_B_WAIT_ACON:
                        case OTG_STATE_B_PERIPHERAL:
@@ -643,7 +643,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                         * undesired detour through A_WAIT_BCON.
                         */
                        musb_hnp_stop(musb);
-                       usb_hcd_resume_root_hub(musb_to_hcd(musb));
+                       musb_host_resume_root_hub(musb);
                        musb_root_disconnect(musb);
                        musb_platform_try_idle(musb, jiffies
                                        + msecs_to_jiffies(musb->a_wait_bcon
@@ -685,7 +685,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
        }
 
        if (int_usb & MUSB_INTR_CONNECT) {
-               struct usb_hcd *hcd = musb_to_hcd(musb);
+               struct usb_hcd *hcd = musb->hcd;
 
                handled = IRQ_HANDLED;
                musb->is_active = 1;
@@ -726,31 +726,27 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
 b_host:
                        musb->xceiv->state = OTG_STATE_B_HOST;
-                       hcd->self.is_b_host = 1;
-                       musb->ignore_disconnect = 0;
+                       if (musb->hcd)
+                               musb->hcd->self.is_b_host = 1;
                        del_timer(&musb->otg_timer);
                        break;
                default:
                        if ((devctl & MUSB_DEVCTL_VBUS)
                                        == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
                                musb->xceiv->state = OTG_STATE_A_HOST;
-                               hcd->self.is_b_host = 0;
+                               if (hcd)
+                                       hcd->self.is_b_host = 0;
                        }
                        break;
                }
 
-               /* poke the root hub */
-               MUSB_HST_MODE(musb);
-               if (hcd->status_urb)
-                       usb_hcd_poll_rh_status(hcd);
-               else
-                       usb_hcd_resume_root_hub(hcd);
+               musb_host_poke_root_hub(musb);
 
                dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
                                usb_otg_state_string(musb->xceiv->state), devctl);
        }
 
-       if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+       if (int_usb & MUSB_INTR_DISCONNECT) {
                dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
                                usb_otg_state_string(musb->xceiv->state),
                                MUSB_MODE(musb), devctl);
@@ -759,7 +755,7 @@ b_host:
                switch (musb->xceiv->state) {
                case OTG_STATE_A_HOST:
                case OTG_STATE_A_SUSPEND:
-                       usb_hcd_resume_root_hub(musb_to_hcd(musb));
+                       musb_host_resume_root_hub(musb);
                        musb_root_disconnect(musb);
                        if (musb->a_wait_bcon != 0)
                                musb_platform_try_idle(musb, jiffies
@@ -772,7 +768,8 @@ b_host:
                         * in hnp_stop() is currently not used...
                         */
                        musb_root_disconnect(musb);
-                       musb_to_hcd(musb)->self.is_b_host = 0;
+                       if (musb->hcd)
+                               musb->hcd->self.is_b_host = 0;
                        musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                        MUSB_DEV_MODE(musb);
                        musb_g_disconnect(musb);
@@ -818,11 +815,6 @@ b_host:
                                usb_otg_state_string(musb->xceiv->state));
                        switch (musb->xceiv->state) {
                        case OTG_STATE_A_SUSPEND:
-                               /* We need to ignore disconnect on suspend
-                                * otherwise tusb 2.0 won't reconnect after a
-                                * power cycle, which breaks otg compliance.
-                                */
-                               musb->ignore_disconnect = 1;
                                musb_g_reset(musb);
                                /* FALLTHROUGH */
                        case OTG_STATE_A_WAIT_BCON:     /* OPT TD.4.7-900ms */
@@ -834,7 +826,6 @@ b_host:
                                        + msecs_to_jiffies(TA_WAIT_BCON(musb)));
                                break;
                        case OTG_STATE_A_PERIPHERAL:
-                               musb->ignore_disconnect = 0;
                                del_timer(&musb->otg_timer);
                                musb_g_reset(musb);
                                break;
@@ -909,51 +900,6 @@ b_host:
 
 /*-------------------------------------------------------------------------*/
 
-/*
-* Program the HDRC to start (enable interrupts, dma, etc.).
-*/
-void musb_start(struct musb *musb)
-{
-       void __iomem    *regs = musb->mregs;
-       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
-
-       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
-
-       /*  Set INT enable registers, enable interrupts */
-       musb->intrtxe = musb->epmask;
-       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
-       musb->intrrxe = musb->epmask & 0xfffe;
-       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
-       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
-
-       musb_writeb(regs, MUSB_TESTMODE, 0);
-
-       /* put into basic highspeed mode and start session */
-       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
-                                               | MUSB_POWER_HSENAB
-                                               /* ENSUSPEND wedges tusb */
-                                               /* | MUSB_POWER_ENSUSPEND */
-                                               );
-
-       musb->is_active = 0;
-       devctl = musb_readb(regs, MUSB_DEVCTL);
-       devctl &= ~MUSB_DEVCTL_SESSION;
-
-       /* session started after:
-        * (a) ID-grounded irq, host mode;
-        * (b) vbus present/connect IRQ, peripheral mode;
-        * (c) peripheral initiates, using SRP
-        */
-       if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
-               musb->is_active = 1;
-       else
-               devctl |= MUSB_DEVCTL_SESSION;
-
-       musb_platform_enable(musb);
-       musb_writeb(regs, MUSB_DEVCTL, devctl);
-}
-
-
 static void musb_generic_disable(struct musb *musb)
 {
        void __iomem    *mbase = musb->mregs;
@@ -1007,6 +953,7 @@ static void musb_shutdown(struct platform_device *pdev)
 
        pm_runtime_get_sync(musb->controller);
 
+       musb_host_cleanup(musb);
        musb_gadget_cleanup(musb);
 
        spin_lock_irqsave(&musb->lock, flags);
@@ -1763,24 +1710,18 @@ static struct musb *allocate_instance(struct device *dev,
        struct musb             *musb;
        struct musb_hw_ep       *ep;
        int                     epnum;
-       struct usb_hcd  *hcd;
+       int                     ret;
 
-       hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
-       if (!hcd)
+       musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
+       if (!musb)
                return NULL;
-       /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
 
-       musb = hcd_to_musb(hcd);
        INIT_LIST_HEAD(&musb->control);
        INIT_LIST_HEAD(&musb->in_bulk);
        INIT_LIST_HEAD(&musb->out_bulk);
 
-       hcd->uses_new_polling = 1;
-       hcd->has_tt = 1;
-
        musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
        musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
-       dev_set_drvdata(dev, musb);
        musb->mregs = mbase;
        musb->ctrl_base = mbase;
        musb->nIrq = -ENODEV;
@@ -1795,7 +1736,16 @@ static struct musb *allocate_instance(struct device *dev,
 
        musb->controller = dev;
 
+       ret = musb_host_alloc(musb);
+       if (ret < 0)
+               goto err_free;
+
+       dev_set_drvdata(dev, musb);
+
        return musb;
+
+err_free:
+       return NULL;
 }
 
 static void musb_free(struct musb *musb)
@@ -1821,7 +1771,7 @@ static void musb_free(struct musb *musb)
                dma_controller_destroy(c);
        }
 
-       usb_put_hcd(musb_to_hcd(musb));
+       musb_host_free(musb);
 }
 
 /*
@@ -1838,7 +1788,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        int                     status;
        struct musb             *musb;
        struct musb_hdrc_platform_data *plat = dev->platform_data;
-       struct usb_hcd          *hcd;
 
        /* The driver might handle more features than the board; OK.
         * Fail when the board needs a feature that's not enabled.
@@ -1864,6 +1813,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb->board_set_power = plat->set_power;
        musb->min_power = plat->min_power;
        musb->ops = plat->platform_ops;
+       musb->port_mode = plat->mode;
 
        /* The musb_platform_init() call:
         *   - adjusts musb->mregs
@@ -1939,13 +1889,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
                musb->irq_wake = 0;
        }
 
-       /* host side needs more setup */
-       hcd = musb_to_hcd(musb);
-       otg_set_host(musb->xceiv->otg, &hcd->self);
-       hcd->self.otg_port = 1;
-       musb->xceiv->otg->host = &hcd->self;
-       hcd->power_budget = 2 * (plat->power ? : 250);
-
        /* program PHY to use external vBus if required */
        if (plat->extvbus) {
                u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
@@ -1961,7 +1904,23 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
                musb->xceiv->state = OTG_STATE_B_IDLE;
        }
 
-       status = musb_gadget_setup(musb);
+       switch (musb->port_mode) {
+       case MUSB_PORT_MODE_HOST:
+               status = musb_host_setup(musb, plat->power);
+               break;
+       case MUSB_PORT_MODE_GADGET:
+               status = musb_gadget_setup(musb);
+               break;
+       case MUSB_PORT_MODE_DUAL_ROLE:
+               status = musb_host_setup(musb, plat->power);
+               if (status < 0)
+                       goto fail3;
+               status = musb_gadget_setup(musb);
+               break;
+       default:
+               dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
+               break;
+       }
 
        if (status < 0)
                goto fail3;
index 7fb4819a6f115f54d097ef09eb131fb820994d05..7d341c387eab94e4148cba0e8db0e2bf25130811 100644 (file)
@@ -77,28 +77,17 @@ struct musb_ep;
 #define is_peripheral_active(m)                (!(m)->is_host)
 #define is_host_active(m)              ((m)->is_host)
 
+enum {
+       MUSB_PORT_MODE_HOST     = 1,
+       MUSB_PORT_MODE_GADGET,
+       MUSB_PORT_MODE_DUAL_ROLE,
+};
+
 #ifdef CONFIG_PROC_FS
 #include <linux/fs.h>
 #define MUSB_CONFIG_PROC_FS
 #endif
 
-/****************************** PERIPHERAL ROLE *****************************/
-
-extern irqreturn_t musb_g_ep0_irq(struct musb *);
-extern void musb_g_tx(struct musb *, u8);
-extern void musb_g_rx(struct musb *, u8);
-extern void musb_g_reset(struct musb *);
-extern void musb_g_suspend(struct musb *);
-extern void musb_g_resume(struct musb *);
-extern void musb_g_wakeup(struct musb *);
-extern void musb_g_disconnect(struct musb *);
-
-/****************************** HOST ROLE ***********************************/
-
-extern irqreturn_t musb_h_ep0_irq(struct musb *);
-extern void musb_host_tx(struct musb *, u8);
-extern void musb_host_rx(struct musb *, u8);
-
 /****************************** CONSTANTS ********************************/
 
 #ifndef MUSB_C_NUM_EPS
@@ -373,6 +362,7 @@ struct musb {
 
        u8                      min_power;      /* vbus for periph, in mA/2 */
 
+       int                     port_mode;      /* MUSB_PORT_MODE_* */
        bool                    is_host;
 
        int                     a_wait_bcon;    /* VBUS timeout in msecs */
@@ -382,7 +372,6 @@ struct musb {
        unsigned                is_active:1;
 
        unsigned is_multipoint:1;
-       unsigned ignore_disconnect:1;   /* during bus resets */
 
        unsigned                hb_iso_rx:1;    /* high bandwidth iso rx? */
        unsigned                hb_iso_tx:1;    /* high bandwidth iso tx? */
@@ -419,6 +408,7 @@ struct musb {
        enum musb_g_ep0_state   ep0_state;
        struct usb_gadget       g;                      /* the gadget */
        struct usb_gadget_driver *gadget_driver;        /* its driver */
+       struct usb_hcd          *hcd;                   /* the usb hcd */
 
        /*
         * FIXME: Remove this flag.
@@ -520,7 +510,6 @@ static inline void musb_configure_ep0(struct musb *musb)
 
 extern const char musb_driver_name[];
 
-extern void musb_start(struct musb *musb);
 extern void musb_stop(struct musb *musb);
 
 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
index ba7092349fa95507a24ac3dedb7c0256f93ab269..0414bc19d0095258ab5e646858ce342a740f92c8 100644 (file)
@@ -1820,7 +1820,6 @@ static int musb_gadget_start(struct usb_gadget *g,
 {
        struct musb             *musb = gadget_to_musb(g);
        struct usb_otg          *otg = musb->xceiv->otg;
-       struct usb_hcd          *hcd = musb_to_hcd(musb);
        unsigned long           flags;
        int                     retval = 0;
 
@@ -1847,17 +1846,9 @@ static int musb_gadget_start(struct usb_gadget *g,
         * handles power budgeting ... this way also
         * ensures HdrcStart is indirectly called.
         */
-       retval = usb_add_hcd(hcd, 0, 0);
-       if (retval < 0) {
-               dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
-               goto err;
-       }
-
        if (musb->xceiv->last_event == USB_EVENT_ID)
                musb_platform_set_vbus(musb, 1);
 
-       hcd->self.uses_pio_for_control = 1;
-
        if (musb->xceiv->last_event == USB_EVENT_NONE)
                pm_runtime_put(musb->controller);
 
@@ -1942,7 +1933,6 @@ static int musb_gadget_stop(struct usb_gadget *g,
        musb_platform_try_idle(musb, 0);
        spin_unlock_irqrestore(&musb->lock, flags);
 
-       usb_remove_hcd(musb_to_hcd(musb));
        /*
         * FIXME we need to be able to register another
         * gadget driver here and have everything work;
index 66b7c5e0fb44541ed9bffff534fb38f0c80d41a9..0314dfc770c7e55a7483637ee07e1dcf520b7b8d 100644 (file)
 
 #include <linux/list.h>
 
+#if IS_ENABLED(CONFIG_USB_MUSB_GADGET) || IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+extern int musb_gadget_setup(struct musb *);
+
+#else
+static inline irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+       return 0;
+}
+
+static inline void musb_g_tx(struct musb *musb, u8 epnum)      {}
+static inline void musb_g_rx(struct musb *musb, u8 epnum)      {}
+static inline void musb_g_reset(struct musb *musb)             {}
+static inline void musb_g_suspend(struct musb *musb)           {}
+static inline void musb_g_resume(struct musb *musb)            {}
+static inline void musb_g_wakeup(struct musb *musb)            {}
+static inline void musb_g_disconnect(struct musb *musb)                {}
+static inline void musb_gadget_cleanup(struct musb *musb)      {}
+static inline int musb_gadget_setup(struct musb *musb)
+{
+       return 0;
+}
+#endif
+
 enum buffer_map_state {
        UN_MAPPED = 0,
        PRE_MAPPED,
@@ -106,14 +138,8 @@ static inline struct musb_request *next_request(struct musb_ep *ep)
        return container_of(queue->next, struct musb_request, list);
 }
 
-extern void musb_g_tx(struct musb *musb, u8 epnum);
-extern void musb_g_rx(struct musb *musb, u8 epnum);
-
 extern const struct usb_ep_ops musb_g_ep0_ops;
 
-extern int musb_gadget_setup(struct musb *);
-extern void musb_gadget_cleanup(struct musb *);
-
 extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
 
 extern void musb_ep_restart(struct musb *, struct musb_request *);
index 9d3044bdebe54c68ac7d506a2fa593f59287c4c0..a9695f5a92fb9ed46d8f87f45c4b05e3768f2b5b 100644 (file)
@@ -46,7 +46,6 @@
 #include "musb_core.h"
 #include "musb_host.h"
 
-
 /* MUSB HOST status 22-mar-2006
  *
  * - There's still lots of partial code duplication for fault paths, so
  * of transfers between endpoints, or anything clever.
  */
 
+struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+       return *(struct musb **) hcd->hcd_priv;
+}
+
 
 static void musb_ep_program(struct musb *musb, u8 epnum,
                        struct urb *urb, int is_out,
@@ -269,8 +273,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
                /* FIXME this doesn't implement that scheduling policy ...
                 * or handle framecounter wrapping
                 */
-               if ((urb->transfer_flags & URB_ISO_ASAP)
-                               || (frame >= urb->start_frame)) {
+               if (1) {        /* Always assume URB_ISO_ASAP */
                        /* REVISIT the SOF irq handler shouldn't duplicate
                         * this code; and we don't init urb->start_frame...
                         */
@@ -311,9 +314,9 @@ __acquires(musb->lock)
                        urb->actual_length, urb->transfer_buffer_length
                        );
 
-       usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+       usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
        spin_unlock(&musb->lock);
-       usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+       usb_hcd_giveback_urb(musb->hcd, urb, status);
        spin_lock(&musb->lock);
 }
 
@@ -625,7 +628,7 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
        u16                     csr;
        u8                      mode;
 
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
        if (length > channel->max_len)
                length = channel->max_len;
 
@@ -1455,7 +1458,7 @@ done:
        if (length > qh->maxpacket)
                length = qh->maxpacket;
        /* Unmap the buffer so that CPU can use it */
-       usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
+       usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
 
        /*
         * We need to map sg if the transfer_buffer is
@@ -1657,7 +1660,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 
        /* FIXME this is _way_ too much in-line logic for Mentor DMA */
 
-#ifndef CONFIG_USB_INVENTRA_DMA
+#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
        if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
                /* REVISIT this happened for a while on some short reads...
                 * the cleanup still needs investigation... looks bad...
@@ -1689,7 +1692,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                        | MUSB_RXCSR_RXPKTRDY);
                musb_writew(hw_ep->regs, MUSB_RXCSR, val);
 
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
                if (usb_pipeisoc(pipe)) {
                        struct usb_iso_packet_descriptor *d;
 
@@ -1745,7 +1748,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                }
 
                /* we are expecting IN packets */
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
                if (dma) {
                        struct dma_controller   *c;
                        u16                     rx_count;
@@ -1754,10 +1757,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 
                        rx_count = musb_readw(epio, MUSB_RXCOUNT);
 
-                       dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
+                       dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n",
                                        epnum, rx_count,
-                                       urb->transfer_dma
-                                               + urb->actual_length,
+                                       (unsigned long long) urb->transfer_dma
+                                       + urb->actual_length,
                                        qh->offset,
                                        urb->transfer_buffer_length);
 
@@ -1869,7 +1872,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                        unsigned int received_len;
 
                        /* Unmap the buffer so that CPU can use it */
-                       usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
+                       usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
 
                        /*
                         * We need to map sg if the transfer_buffer is
@@ -2463,7 +2466,6 @@ static int musb_bus_resume(struct usb_hcd *hcd)
        return 0;
 }
 
-
 #ifndef CONFIG_MUSB_PIO_ONLY
 
 #define MUSB_USB_DMA_ALIGN 4
@@ -2575,10 +2577,10 @@ static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
 }
 #endif /* !CONFIG_MUSB_PIO_ONLY */
 
-const struct hc_driver musb_hc_driver = {
+static const struct hc_driver musb_hc_driver = {
        .description            = "musb-hcd",
        .product_desc           = "MUSB HDRC host driver",
-       .hcd_priv_size          = sizeof(struct musb),
+       .hcd_priv_size          = sizeof(struct musb *),
        .flags                  = HCD_USB2 | HCD_MEMORY,
 
        /* not using irq handler or reset hooks from usbcore, since
@@ -2606,3 +2608,66 @@ const struct hc_driver musb_hc_driver = {
        /* .start_port_reset    = NULL, */
        /* .hub_irq_enable      = NULL, */
 };
+
+int musb_host_alloc(struct musb *musb)
+{
+       struct device   *dev = musb->controller;
+
+       /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+       musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
+       if (!musb->hcd)
+               return -EINVAL;
+
+       *musb->hcd->hcd_priv = (unsigned long) musb;
+       musb->hcd->self.uses_pio_for_control = 1;
+       musb->hcd->uses_new_polling = 1;
+       musb->hcd->has_tt = 1;
+
+       return 0;
+}
+
+void musb_host_cleanup(struct musb *musb)
+{
+       usb_remove_hcd(musb->hcd);
+       musb->hcd = NULL;
+}
+
+void musb_host_free(struct musb *musb)
+{
+       usb_put_hcd(musb->hcd);
+}
+
+int musb_host_setup(struct musb *musb, int power_budget)
+{
+       int ret;
+       struct usb_hcd *hcd = musb->hcd;
+
+       MUSB_HST_MODE(musb);
+       musb->xceiv->otg->default_a = 1;
+       musb->xceiv->state = OTG_STATE_A_IDLE;
+
+       otg_set_host(musb->xceiv->otg, &hcd->self);
+       hcd->self.otg_port = 1;
+       musb->xceiv->otg->host = &hcd->self;
+       hcd->power_budget = 2 * (power_budget ? : 250);
+
+       ret = usb_add_hcd(hcd, 0, 0);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+void musb_host_resume_root_hub(struct musb *musb)
+{
+       usb_hcd_resume_root_hub(musb->hcd);
+}
+
+void musb_host_poke_root_hub(struct musb *musb)
+{
+       MUSB_HST_MODE(musb);
+       if (musb->hcd->status_urb)
+               usb_hcd_poll_rh_status(musb->hcd);
+       else
+               usb_hcd_resume_root_hub(musb->hcd);
+}
index 738f7eb60df96d8aa78859720940af81c007cd3b..960d73570b2f55c93bdb77fcf72187772a404aa3 100644 (file)
 
 #include <linux/scatterlist.h>
 
-static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
-{
-       return container_of((void *) musb, struct usb_hcd, hcd_priv);
-}
-
-static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
-{
-       return (struct musb *) (hcd->hcd_priv);
-}
-
 /* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
 struct musb_qh {
        struct usb_host_endpoint *hep;          /* usbcore info */
@@ -86,7 +76,52 @@ static inline struct musb_qh *first_qh(struct list_head *q)
 }
 
 
+#if IS_ENABLED(CONFIG_USB_MUSB_HOST) || IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
+extern struct musb *hcd_to_musb(struct usb_hcd *);
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern int musb_host_alloc(struct musb *);
+extern int musb_host_setup(struct musb *, int);
+extern void musb_host_cleanup(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+extern void musb_root_disconnect(struct musb *musb);
+extern void musb_host_free(struct musb *);
+extern void musb_host_cleanup(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
 extern void musb_root_disconnect(struct musb *musb);
+extern void musb_host_resume_root_hub(struct musb *musb);
+extern void musb_host_poke_root_hub(struct musb *musb);
+#else
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+       return NULL;
+}
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+       return 0;
+}
+
+static inline int musb_host_alloc(struct musb *musb)
+{
+       return 0;
+}
+
+static inline int musb_host_setup(struct musb *musb, int power_budget)
+{
+       return 0;
+}
+
+static inline void musb_host_cleanup(struct musb *musb)                {}
+static inline void musb_host_free(struct musb *musb)           {}
+static inline void musb_host_tx(struct musb *musb, u8 epnum)   {}
+static inline void musb_host_rx(struct musb *musb, u8 epnum)   {}
+static inline void musb_root_disconnect(struct musb *musb)     {}
+static inline void musb_host_resume_root_hub(struct musb *musb)        {}
+static inline void musb_host_poll_rh_status(struct musb *musb) {}
+static inline void musb_host_poke_root_hub(struct musb *musb)  {}
+#endif
 
 struct usb_hcd;
 
@@ -95,8 +130,6 @@ extern int musb_hub_control(struct usb_hcd *hcd,
                        u16 typeReq, u16 wValue, u16 wIndex,
                        char *buf, u16 wLength);
 
-extern const struct hc_driver musb_hc_driver;
-
 static inline struct urb *next_urb(struct musb_qh *qh)
 {
        struct list_head        *queue;
index ef7d11045f561cb69db73d57ee5a42777fd8a91e..a523950c2b32e66e07f7c8bd78daa181f691f25c 100644 (file)
 
 #include "musb_core.h"
 
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+static void musb_start(struct musb *musb)
+{
+       void __iomem    *regs = musb->mregs;
+       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
+
+       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
+
+       /*  Set INT enable registers, enable interrupts */
+       musb->intrtxe = musb->epmask;
+       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
+       musb->intrrxe = musb->epmask & 0xfffe;
+       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
+       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+       musb_writeb(regs, MUSB_TESTMODE, 0);
+
+       /* put into basic highspeed mode and start session */
+       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+                                               | MUSB_POWER_HSENAB
+                                               /* ENSUSPEND wedges tusb */
+                                               /* | MUSB_POWER_ENSUSPEND */
+                                               );
+
+       musb->is_active = 0;
+       devctl = musb_readb(regs, MUSB_DEVCTL);
+       devctl &= ~MUSB_DEVCTL_SESSION;
+
+       /* session started after:
+        * (a) ID-grounded irq, host mode;
+        * (b) vbus present/connect IRQ, peripheral mode;
+        * (c) peripheral initiates, using SRP
+        */
+       if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+           (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
+               musb->is_active = 1;
+       } else {
+               devctl |= MUSB_DEVCTL_SESSION;
+       }
+
+       musb_platform_enable(musb);
+       musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
 
 static void musb_port_suspend(struct musb *musb, bool do_suspend)
 {
@@ -145,7 +190,6 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
                        msleep(1);
                }
 
-               musb->ignore_disconnect = true;
                power &= 0xf0;
                musb_writeb(mbase, MUSB_POWER,
                                power | MUSB_POWER_RESET);
@@ -158,8 +202,6 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
                musb_writeb(mbase, MUSB_POWER,
                                power & ~MUSB_POWER_RESET);
 
-               musb->ignore_disconnect = false;
-
                power = musb_readb(mbase, MUSB_POWER);
                if (power & MUSB_POWER_HSMODE) {
                        dev_dbg(musb->controller, "high-speed device connected\n");
@@ -170,7 +212,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
                musb->port1_status |= USB_PORT_STAT_ENABLE
                                        | (USB_PORT_STAT_C_RESET << 16)
                                        | (USB_PORT_STAT_C_ENABLE << 16);
-               usb_hcd_poll_rh_status(musb_to_hcd(musb));
+               usb_hcd_poll_rh_status(musb->hcd);
 
                musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
        }
@@ -183,7 +225,7 @@ void musb_root_disconnect(struct musb *musb)
        musb->port1_status = USB_PORT_STAT_POWER
                        | (USB_PORT_STAT_C_CONNECTION << 16);
 
-       usb_hcd_poll_rh_status(musb_to_hcd(musb));
+       usb_hcd_poll_rh_status(musb->hcd);
        musb->is_active = 0;
 
        switch (musb->xceiv->state) {
@@ -337,7 +379,7 @@ int musb_hub_control(
                        musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
                                        | MUSB_PORT_STAT_RESUME);
                        musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
-                       usb_hcd_poll_rh_status(musb_to_hcd(musb));
+                       usb_hcd_poll_rh_status(musb->hcd);
                        /* NOTE: it might really be A_WAIT_BCON ... */
                        musb->xceiv->state = OTG_STATE_A_HOST;
                }
index 628b93fe5cccf585d21759fa112086a345a2e92a..4315d351fc7dbb4e026a0dce65dd87b761f55561 100644 (file)
@@ -87,7 +87,7 @@ static void musb_do_idle(unsigned long _musb)
                        musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
                                                | MUSB_PORT_STAT_RESUME);
                        musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
-                       usb_hcd_poll_rh_status(musb_to_hcd(musb));
+                       usb_hcd_poll_rh_status(musb->hcd);
                        /* NOTE: it might really be A_WAIT_BCON ... */
                        musb->xceiv->state = OTG_STATE_A_HOST;
                }
@@ -481,6 +481,7 @@ static u64 omap2430_dmamask = DMA_BIT_MASK(32);
 
 static int omap2430_probe(struct platform_device *pdev)
 {
+       struct resource                 musb_resources[2];
        struct musb_hdrc_platform_data  *pdata = pdev->dev.platform_data;
        struct omap_musb_board_data     *data;
        struct platform_device          *musb;
@@ -567,8 +568,21 @@ static int omap2430_probe(struct platform_device *pdev)
 
        INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
+       memset(musb_resources, 0x00, sizeof(*musb_resources) *
+                       ARRAY_SIZE(musb_resources));
+
+       musb_resources[0].name = pdev->resource[0].name;
+       musb_resources[0].start = pdev->resource[0].start;
+       musb_resources[0].end = pdev->resource[0].end;
+       musb_resources[0].flags = pdev->resource[0].flags;
+
+       musb_resources[1].name = pdev->resource[1].name;
+       musb_resources[1].start = pdev->resource[1].start;
+       musb_resources[1].end = pdev->resource[1].end;
+       musb_resources[1].flags = pdev->resource[1].flags;
+
+       ret = platform_device_add_resources(musb, musb_resources,
+                       ARRAY_SIZE(musb_resources));
        if (ret) {
                dev_err(&pdev->dev, "failed to add resources\n");
                goto err2;
index 7369ba33c94f1edc529c93dd03444c99fae5c464..2c06a8969a9f793153d6f6f0546be9d5eb1ff8a0 100644 (file)
@@ -1156,6 +1156,7 @@ static u64 tusb_dmamask = DMA_BIT_MASK(32);
 
 static int tusb_probe(struct platform_device *pdev)
 {
+       struct resource musb_resources[2];
        struct musb_hdrc_platform_data  *pdata = pdev->dev.platform_data;
        struct platform_device          *musb;
        struct tusb6010_glue            *glue;
@@ -1185,8 +1186,21 @@ static int tusb_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
+       memset(musb_resources, 0x00, sizeof(*musb_resources) *
+                       ARRAY_SIZE(musb_resources));
+
+       musb_resources[0].name = pdev->resource[0].name;
+       musb_resources[0].start = pdev->resource[0].start;
+       musb_resources[0].end = pdev->resource[0].end;
+       musb_resources[0].flags = pdev->resource[0].flags;
+
+       musb_resources[1].name = pdev->resource[1].name;
+       musb_resources[1].start = pdev->resource[1].start;
+       musb_resources[1].end = pdev->resource[1].end;
+       musb_resources[1].flags = pdev->resource[1].flags;
+
+       ret = platform_device_add_resources(musb, musb_resources,
+                       ARRAY_SIZE(musb_resources));
        if (ret) {
                dev_err(&pdev->dev, "failed to add resources\n");
                goto err3;
index 2c80004e0a83029b52b7ea13f2d4c97194f40412..028ff4d07dc74a3346df56c6a677e4e8d95bbf9e 100644 (file)
@@ -189,6 +189,7 @@ static const struct musb_platform_ops ux500_ops = {
 
 static int ux500_probe(struct platform_device *pdev)
 {
+       struct resource musb_resources[2];
        struct musb_hdrc_platform_data  *pdata = pdev->dev.platform_data;
        struct platform_device          *musb;
        struct ux500_glue               *glue;
@@ -232,8 +233,21 @@ static int ux500_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
+       memset(musb_resources, 0x00, sizeof(*musb_resources) *
+                       ARRAY_SIZE(musb_resources));
+
+       musb_resources[0].name = pdev->resource[0].name;
+       musb_resources[0].start = pdev->resource[0].start;
+       musb_resources[0].end = pdev->resource[0].end;
+       musb_resources[0].flags = pdev->resource[0].flags;
+
+       musb_resources[1].name = pdev->resource[1].name;
+       musb_resources[1].start = pdev->resource[1].start;
+       musb_resources[1].end = pdev->resource[1].end;
+       musb_resources[1].flags = pdev->resource[1].flags;
+
+       ret = platform_device_add_resources(musb, musb_resources,
+                       ARRAY_SIZE(musb_resources));
        if (ret) {
                dev_err(&pdev->dev, "failed to add resources\n");
                goto err5;
index 33812064114557c58a2db3bc9166b1bbe7d57f61..63e7c8a6b125df96cb2ed22b6b99fc70c5f60950 100644 (file)
@@ -71,8 +71,7 @@ static void ux500_dma_callback(void *private_data)
        spin_lock_irqsave(&musb->lock, flags);
        ux500_channel->channel.actual_len = ux500_channel->cur_len;
        ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
-       musb_dma_completion(musb, hw_ep->epnum,
-               ux500_channel->is_tx);
+       musb_dma_completion(musb, hw_ep->epnum, ux500_channel->is_tx);
        spin_unlock_irqrestore(&musb->lock, flags);
 
 }
@@ -366,7 +365,8 @@ void dma_controller_destroy(struct dma_controller *c)
        kfree(controller);
 }
 
-struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *base)
+struct dma_controller *dma_controller_create(struct musb *musb,
+                                       void __iomem *base)
 {
        struct ux500_dma_controller *controller;
        struct platform_device *pdev = to_platform_device(musb->controller);
index 7ef3eb8617a6c7adc7775bc76a5fb0fd3d699ef8..a5a9552c25a1224876a51342a17f0af784e845c5 100644 (file)
@@ -4,11 +4,17 @@
 menuconfig USB_PHY
        bool "USB Physical Layer drivers"
        help
-         USB controllers (those which are host, device or DRD) need a
-         device to handle the physical layer signalling, commonly called
-         a PHY.
+         Most USB controllers have the physical layer signalling part
+         (commonly called a PHY) built in.  However, dual-role devices
+         (a.k.a. USB on-the-go) which support being USB master or slave
+         with the same connector often use an external PHY.
 
-         The following drivers add support for such PHY devices.
+         The drivers in this submenu add support for such PHY devices.
+         They are not needed for standard master-only (or the vast
+         majority of slave-only) USB interfaces.
+
+         If you're not sure if this applies to you, it probably doesn't;
+         say N here.
 
 if USB_PHY
 
@@ -86,7 +92,7 @@ config OMAP_USB3
          on/off the PHY.
 
 config SAMSUNG_USBPHY
-       tristate "Samsung USB PHY Driver"
+       tristate
        help
          Enable this to support Samsung USB phy helper driver for Samsung SoCs.
          This driver provides common interface to interact, for Samsung USB 2.0 PHY
index a9169cb1e6fcca8d85856b4414bb36d89df1239d..070eca3af18b756e416d5bc9475c18ee9aaa5460 100644 (file)
@@ -5,6 +5,7 @@
 ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
 
 obj-$(CONFIG_USB_PHY)                  += phy.o
+obj-$(CONFIG_OF)                       += of.o
 
 # transceiver drivers, keep the list sorted
 
diff --git a/drivers/usb/phy/of.c b/drivers/usb/phy/of.c
new file mode 100644 (file)
index 0000000..7ea0154
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * USB of helper code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+
+static const char *const usbphy_modes[] = {
+       [USBPHY_INTERFACE_MODE_UNKNOWN] = "",
+       [USBPHY_INTERFACE_MODE_UTMI]    = "utmi",
+       [USBPHY_INTERFACE_MODE_UTMIW]   = "utmi_wide",
+       [USBPHY_INTERFACE_MODE_ULPI]    = "ulpi",
+       [USBPHY_INTERFACE_MODE_SERIAL]  = "serial",
+       [USBPHY_INTERFACE_MODE_HSIC]    = "hsic",
+};
+
+/**
+ * of_usb_get_phy_mode - Get phy mode for given device_node
+ * @np:        Pointer to the given device_node
+ *
+ * The function gets phy interface string from property 'phy_type',
+ * and returns the correspondig enum usb_phy_interface
+ */
+enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np)
+{
+       const char *phy_type;
+       int err, i;
+
+       err = of_property_read_string(np, "phy_type", &phy_type);
+       if (err < 0)
+               return USBPHY_INTERFACE_MODE_UNKNOWN;
+
+       for (i = 0; i < ARRAY_SIZE(usbphy_modes); i++)
+               if (!strcmp(phy_type, usbphy_modes[i]))
+                       return i;
+
+       return USBPHY_INTERFACE_MODE_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(of_usb_get_phy_mode);
index e5eb1b5a04ebb815ffd46be5c1ab48b2fb8ed193..087402350b6d3f66b46212881c52d8fcd8b9f296 100644 (file)
@@ -1,10 +1,12 @@
 /*
  * drivers/usb/otg/ab8500_usb.c
  *
- * USB transceiver driver for AB8500 chip
+ * USB transceiver driver for AB8500 family chips
  *
- * Copyright (C) 2010 ST-Ericsson AB
+ * Copyright (C) 2010-2013 ST-Ericsson AB
  * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * Avinash Kumar <avinash.kumar@stericsson.com>
+ * Thirupathi Chippakurthy <thirupathi.chippakurthy@stericsson.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -29,6 +31,8 @@
 #include <linux/notifier.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/usb/musb-ux500.h>
 /* Bank AB8500_USB */
 #define AB8500_USB_LINE_STAT_REG 0x80
 #define AB8505_USB_LINE_STAT_REG 0x94
+#define AB8540_USB_LINK_STAT_REG 0x94
+#define AB9540_USB_LINK_STAT_REG 0x94
+#define AB8540_USB_OTG_CTL_REG 0x87
 #define AB8500_USB_PHY_CTRL_REG 0x8A
+#define AB8540_VBUS_CTRL_REG 0x82
 
 /* Bank AB8500_DEVELOPMENT */
 #define AB8500_BANK12_ACCESS 0x00
 
 /* Bank AB8500_DEBUG */
+#define AB8540_DEBUG 0x32
 #define AB8500_USB_PHY_TUNE1 0x05
 #define AB8500_USB_PHY_TUNE2 0x06
 #define AB8500_USB_PHY_TUNE3 0x07
 
+/* Bank AB8500_INTERRUPT */
+#define AB8500_IT_SOURCE2_REG 0x01
+
 #define AB8500_BIT_OTG_STAT_ID (1 << 0)
 #define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0)
 #define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1)
 #define AB8500_BIT_WD_CTRL_ENABLE (1 << 0)
 #define AB8500_BIT_WD_CTRL_KICK (1 << 1)
+#define AB8500_BIT_SOURCE2_VBUSDET (1 << 7)
+#define AB8540_BIT_OTG_CTL_VBUS_VALID_ENA (1 << 0)
+#define AB8540_BIT_OTG_CTL_ID_HOST_ENA (1 << 1)
+#define AB8540_BIT_OTG_CTL_ID_DEV_ENA (1 << 5)
+#define AB8540_BIT_VBUS_CTRL_CHARG_DET_ENA (1 << 0)
 
 #define AB8500_WD_KICK_DELAY_US 100 /* usec */
 #define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */
@@ -112,6 +129,68 @@ enum ab8505_usb_link_status {
        USB_LINK_MOTOROLA_FACTORY_CBL_PHY_EN_8505,
 };
 
+enum ab8540_usb_link_status {
+       USB_LINK_NOT_CONFIGURED_8540 = 0,
+       USB_LINK_STD_HOST_NC_8540,
+       USB_LINK_STD_HOST_C_NS_8540,
+       USB_LINK_STD_HOST_C_S_8540,
+       USB_LINK_CDP_8540,
+       USB_LINK_RESERVED0_8540,
+       USB_LINK_RESERVED1_8540,
+       USB_LINK_DEDICATED_CHG_8540,
+       USB_LINK_ACA_RID_A_8540,
+       USB_LINK_ACA_RID_B_8540,
+       USB_LINK_ACA_RID_C_NM_8540,
+       USB_LINK_RESERVED2_8540,
+       USB_LINK_RESERVED3_8540,
+       USB_LINK_HM_IDGND_8540,
+       USB_LINK_CHARGERPORT_NOT_OK_8540,
+       USB_LINK_CHARGER_DM_HIGH_8540,
+       USB_LINK_PHYEN_NO_VBUS_NO_IDGND_8540,
+       USB_LINK_STD_UPSTREAM_NO_IDGNG_VBUS_8540,
+       USB_LINK_STD_UPSTREAM_8540,
+       USB_LINK_CHARGER_SE1_8540,
+       USB_LINK_CARKIT_CHGR_1_8540,
+       USB_LINK_CARKIT_CHGR_2_8540,
+       USB_LINK_ACA_DOCK_CHGR_8540,
+       USB_LINK_SAMSUNG_BOOT_CBL_PHY_EN_8540,
+       USB_LINK_SAMSUNG_BOOT_CBL_PHY_DISB_8540,
+       USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8540,
+       USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8540,
+       USB_LINK_MOTOROLA_FACTORY_CBL_PHY_EN_8540
+};
+
+enum ab9540_usb_link_status {
+       USB_LINK_NOT_CONFIGURED_9540 = 0,
+       USB_LINK_STD_HOST_NC_9540,
+       USB_LINK_STD_HOST_C_NS_9540,
+       USB_LINK_STD_HOST_C_S_9540,
+       USB_LINK_CDP_9540,
+       USB_LINK_RESERVED0_9540,
+       USB_LINK_RESERVED1_9540,
+       USB_LINK_DEDICATED_CHG_9540,
+       USB_LINK_ACA_RID_A_9540,
+       USB_LINK_ACA_RID_B_9540,
+       USB_LINK_ACA_RID_C_NM_9540,
+       USB_LINK_RESERVED2_9540,
+       USB_LINK_RESERVED3_9540,
+       USB_LINK_HM_IDGND_9540,
+       USB_LINK_CHARGERPORT_NOT_OK_9540,
+       USB_LINK_CHARGER_DM_HIGH_9540,
+       USB_LINK_PHYEN_NO_VBUS_NO_IDGND_9540,
+       USB_LINK_STD_UPSTREAM_NO_IDGNG_VBUS_9540,
+       USB_LINK_STD_UPSTREAM_9540,
+       USB_LINK_CHARGER_SE1_9540,
+       USB_LINK_CARKIT_CHGR_1_9540,
+       USB_LINK_CARKIT_CHGR_2_9540,
+       USB_LINK_ACA_DOCK_CHGR_9540,
+       USB_LINK_SAMSUNG_BOOT_CBL_PHY_EN_9540,
+       USB_LINK_SAMSUNG_BOOT_CBL_PHY_DISB_9540,
+       USB_LINK_SAMSUNG_UART_CBL_PHY_EN_9540,
+       USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_9540,
+       USB_LINK_MOTOROLA_FACTORY_CBL_PHY_EN_9540
+};
+
 enum ab8500_usb_mode {
        USB_IDLE = 0,
        USB_PERIPHERAL,
@@ -119,13 +198,30 @@ enum ab8500_usb_mode {
        USB_DEDICATED_CHG
 };
 
+/* Register USB_LINK_STATUS interrupt */
+#define AB8500_USB_FLAG_USE_LINK_STATUS_IRQ    (1 << 0)
+/* Register ID_WAKEUP_F interrupt */
+#define AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ      (1 << 1)
+/* Register VBUS_DET_F interrupt */
+#define AB8500_USB_FLAG_USE_VBUS_DET_IRQ       (1 << 2)
+/* Driver is using the ab-iddet driver*/
+#define AB8500_USB_FLAG_USE_AB_IDDET           (1 << 3)
+/* Enable setting regulators voltage */
+#define AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE  (1 << 4)
+/* Enable the check_vbus_status workaround */
+#define AB8500_USB_FLAG_USE_CHECK_VBUS_STATUS  (1 << 5)
+/* Enable the vbus host workaround */
+#define AB8500_USB_FLAG_USE_VBUS_HOST_QUIRK    (1 << 6)
+
 struct ab8500_usb {
        struct usb_phy phy;
        struct device *dev;
        struct ab8500 *ab8500;
        unsigned vbus_draw;
        struct work_struct phy_dis_work;
+       struct work_struct vbus_event_work;
        enum ab8500_usb_mode mode;
+       struct clk *sysclk;
        struct regulator *v_ape;
        struct regulator *v_musb;
        struct regulator *v_ulpi;
@@ -133,6 +229,8 @@ struct ab8500_usb {
        int previous_link_status_state;
        struct pinctrl *pinctrl;
        struct pinctrl_state *pins_sleep;
+       bool enabled_charging_detection;
+       unsigned int flags;
 };
 
 static inline struct ab8500_usb *phy_to_ab(struct usb_phy *x)
@@ -171,7 +269,7 @@ static void ab8500_usb_regulator_enable(struct ab8500_usb *ab)
        if (ret)
                dev_err(ab->dev, "Failed to enable v-ape\n");
 
-       if (!is_ab8500_2p0_or_earlier(ab->ab8500)) {
+       if (ab->flags & AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE) {
                ab->saved_v_ulpi = regulator_get_voltage(ab->v_ulpi);
                if (ab->saved_v_ulpi < 0)
                        dev_err(ab->dev, "Failed to get v_ulpi voltage\n");
@@ -191,7 +289,7 @@ static void ab8500_usb_regulator_enable(struct ab8500_usb *ab)
        if (ret)
                dev_err(ab->dev, "Failed to enable vddulpivio18\n");
 
-       if (!is_ab8500_2p0_or_earlier(ab->ab8500)) {
+       if (ab->flags & AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE) {
                volt = regulator_get_voltage(ab->v_ulpi);
                if ((volt != 1300000) && (volt != 1350000))
                        dev_err(ab->dev, "Vintcore is not set to 1.3V volt=%d\n",
@@ -212,7 +310,7 @@ static void ab8500_usb_regulator_disable(struct ab8500_usb *ab)
        regulator_disable(ab->v_ulpi);
 
        /* USB is not the only consumer of Vintcore, restore old settings */
-       if (!is_ab8500_2p0_or_earlier(ab->ab8500)) {
+       if (ab->flags & AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE) {
                if (ab->saved_v_ulpi > 0) {
                        ret = regulator_set_voltage(ab->v_ulpi,
                                        ab->saved_v_ulpi, ab->saved_v_ulpi);
@@ -252,11 +350,23 @@ static void ab8500_usb_phy_enable(struct ab8500_usb *ab, bool sel_host)
        if (IS_ERR(ab->pinctrl))
                dev_err(ab->dev, "could not get/set default pinstate\n");
 
+       if (clk_prepare_enable(ab->sysclk))
+               dev_err(ab->dev, "can't prepare/enable clock\n");
+
        ab8500_usb_regulator_enable(ab);
 
        abx500_mask_and_set_register_interruptible(ab->dev,
                        AB8500_USB, AB8500_USB_PHY_CTRL_REG,
                        bit, bit);
+
+       if (ab->flags & AB8500_USB_FLAG_USE_VBUS_HOST_QUIRK) {
+               if (sel_host)
+                       abx500_set_register_interruptible(ab->dev,
+                                       AB8500_USB, AB8540_USB_OTG_CTL_REG,
+                                       AB8540_BIT_OTG_CTL_VBUS_VALID_ENA |
+                                       AB8540_BIT_OTG_CTL_ID_HOST_ENA |
+                                       AB8540_BIT_OTG_CTL_ID_DEV_ENA);
+       }
 }
 
 static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
@@ -274,6 +384,8 @@ static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
        /* Needed to disable the phy.*/
        ab8500_usb_wd_workaround(ab);
 
+       clk_disable_unprepare(ab->sysclk);
+
        ab8500_usb_regulator_disable(ab);
 
        if (!IS_ERR(ab->pinctrl)) {
@@ -286,7 +398,8 @@ static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
                else if (pinctrl_select_state(ab->pinctrl, ab->pins_sleep))
                        dev_err(ab->dev, "could not set pins to sleep state\n");
 
-               /* as USB pins are shared with idddet, release them to allow
+               /*
+                * as USB pins are shared with iddet, release them to allow
                 * iddet to request them
                 */
                pinctrl_put(ab->pinctrl);
@@ -298,6 +411,254 @@ static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
 #define ab8500_usb_peri_phy_en(ab)     ab8500_usb_phy_enable(ab, false)
 #define ab8500_usb_peri_phy_dis(ab)    ab8500_usb_phy_disable(ab, false)
 
+static int ab9540_usb_link_status_update(struct ab8500_usb *ab,
+               enum ab9540_usb_link_status lsts)
+{
+       enum ux500_musb_vbus_id_status event = 0;
+
+       dev_dbg(ab->dev, "ab9540_usb_link_status_update %d\n", lsts);
+
+       if (ab->previous_link_status_state == USB_LINK_HM_IDGND_9540 &&
+                       (lsts == USB_LINK_STD_HOST_C_NS_9540 ||
+                        lsts == USB_LINK_STD_HOST_NC_9540))
+               return 0;
+
+       if (ab->previous_link_status_state == USB_LINK_ACA_RID_A_9540 &&
+                       (lsts == USB_LINK_STD_HOST_NC_9540))
+               return 0;
+
+       ab->previous_link_status_state = lsts;
+
+       switch (lsts) {
+       case USB_LINK_ACA_RID_B_9540:
+               event = UX500_MUSB_RIDB;
+       case USB_LINK_NOT_CONFIGURED_9540:
+       case USB_LINK_RESERVED0_9540:
+       case USB_LINK_RESERVED1_9540:
+       case USB_LINK_RESERVED2_9540:
+       case USB_LINK_RESERVED3_9540:
+               if (ab->mode == USB_PERIPHERAL)
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_CLEAN, &ab->vbus_draw);
+               ab->mode = USB_IDLE;
+               ab->phy.otg->default_a = false;
+               ab->vbus_draw = 0;
+               if (event != UX500_MUSB_RIDB)
+                       event = UX500_MUSB_NONE;
+               /* Fallback to default B_IDLE as nothing is connected. */
+               ab->phy.state = OTG_STATE_B_IDLE;
+               break;
+
+       case USB_LINK_ACA_RID_C_NM_9540:
+               event = UX500_MUSB_RIDC;
+       case USB_LINK_STD_HOST_NC_9540:
+       case USB_LINK_STD_HOST_C_NS_9540:
+       case USB_LINK_STD_HOST_C_S_9540:
+       case USB_LINK_CDP_9540:
+               if (ab->mode == USB_HOST) {
+                       ab->mode = USB_PERIPHERAL;
+                       ab8500_usb_host_phy_dis(ab);
+                       ab8500_usb_peri_phy_en(ab);
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_PREPARE, &ab->vbus_draw);
+               }
+               if (ab->mode == USB_IDLE) {
+                       ab->mode = USB_PERIPHERAL;
+                       ab8500_usb_peri_phy_en(ab);
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_PREPARE, &ab->vbus_draw);
+               }
+               if (event != UX500_MUSB_RIDC)
+                       event = UX500_MUSB_VBUS;
+               break;
+
+       case USB_LINK_ACA_RID_A_9540:
+               event = UX500_MUSB_RIDA;
+       case USB_LINK_HM_IDGND_9540:
+       case USB_LINK_STD_UPSTREAM_9540:
+               if (ab->mode == USB_PERIPHERAL) {
+                       ab->mode = USB_HOST;
+                       ab8500_usb_peri_phy_dis(ab);
+                       ab8500_usb_host_phy_en(ab);
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_PREPARE, &ab->vbus_draw);
+               }
+               if (ab->mode == USB_IDLE) {
+                       ab->mode = USB_HOST;
+                       ab8500_usb_host_phy_en(ab);
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_PREPARE, &ab->vbus_draw);
+               }
+               ab->phy.otg->default_a = true;
+               if (event != UX500_MUSB_RIDA)
+                       event = UX500_MUSB_ID;
+
+               atomic_notifier_call_chain(&ab->phy.notifier,
+                               event, &ab->vbus_draw);
+               break;
+
+       case USB_LINK_DEDICATED_CHG_9540:
+               ab->mode = USB_DEDICATED_CHG;
+               event = UX500_MUSB_CHARGER;
+               atomic_notifier_call_chain(&ab->phy.notifier,
+                               event, &ab->vbus_draw);
+               break;
+
+       case USB_LINK_PHYEN_NO_VBUS_NO_IDGND_9540:
+       case USB_LINK_STD_UPSTREAM_NO_IDGNG_VBUS_9540:
+               if (!(is_ab9540_2p0_or_earlier(ab->ab8500))) {
+                       event = UX500_MUSB_NONE;
+                       if (ab->mode == USB_HOST) {
+                               ab->phy.otg->default_a = false;
+                               ab->vbus_draw = 0;
+                               atomic_notifier_call_chain(&ab->phy.notifier,
+                                               event, &ab->vbus_draw);
+                               ab8500_usb_host_phy_dis(ab);
+                               ab->mode = USB_IDLE;
+                       }
+                       if (ab->mode == USB_PERIPHERAL) {
+                               atomic_notifier_call_chain(&ab->phy.notifier,
+                                               event, &ab->vbus_draw);
+                               ab8500_usb_peri_phy_dis(ab);
+                               atomic_notifier_call_chain(&ab->phy.notifier,
+                                               UX500_MUSB_CLEAN,
+                                               &ab->vbus_draw);
+                               ab->mode = USB_IDLE;
+                               ab->phy.otg->default_a = false;
+                               ab->vbus_draw = 0;
+                       }
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int ab8540_usb_link_status_update(struct ab8500_usb *ab,
+               enum ab8540_usb_link_status lsts)
+{
+       enum ux500_musb_vbus_id_status event = 0;
+
+       dev_dbg(ab->dev, "ab8540_usb_link_status_update %d\n", lsts);
+
+       if (ab->enabled_charging_detection) {
+               /* Disable USB Charger detection */
+               abx500_mask_and_set_register_interruptible(ab->dev,
+                               AB8500_USB, AB8540_VBUS_CTRL_REG,
+                               AB8540_BIT_VBUS_CTRL_CHARG_DET_ENA, 0x00);
+               ab->enabled_charging_detection = false;
+       }
+
+       /*
+        * Spurious link_status interrupts are seen in case of a
+        * disconnection of a device in IDGND and RIDA stage
+        */
+       if (ab->previous_link_status_state == USB_LINK_HM_IDGND_8540 &&
+                       (lsts == USB_LINK_STD_HOST_C_NS_8540 ||
+                        lsts == USB_LINK_STD_HOST_NC_8540))
+               return 0;
+
+       if (ab->previous_link_status_state == USB_LINK_ACA_RID_A_8540 &&
+                       (lsts == USB_LINK_STD_HOST_NC_8540))
+               return 0;
+
+       ab->previous_link_status_state = lsts;
+
+       switch (lsts) {
+       case USB_LINK_ACA_RID_B_8540:
+               event = UX500_MUSB_RIDB;
+       case USB_LINK_NOT_CONFIGURED_8540:
+       case USB_LINK_RESERVED0_8540:
+       case USB_LINK_RESERVED1_8540:
+       case USB_LINK_RESERVED2_8540:
+       case USB_LINK_RESERVED3_8540:
+               ab->mode = USB_IDLE;
+               ab->phy.otg->default_a = false;
+               ab->vbus_draw = 0;
+               if (event != UX500_MUSB_RIDB)
+                       event = UX500_MUSB_NONE;
+               /*
+                * Fallback to default B_IDLE as nothing
+                * is connected
+                */
+               ab->phy.state = OTG_STATE_B_IDLE;
+               break;
+
+       case USB_LINK_ACA_RID_C_NM_8540:
+               event = UX500_MUSB_RIDC;
+       case USB_LINK_STD_HOST_NC_8540:
+       case USB_LINK_STD_HOST_C_NS_8540:
+       case USB_LINK_STD_HOST_C_S_8540:
+       case USB_LINK_CDP_8540:
+               if (ab->mode == USB_IDLE) {
+                       ab->mode = USB_PERIPHERAL;
+                       ab8500_usb_peri_phy_en(ab);
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_PREPARE, &ab->vbus_draw);
+               }
+               if (event != UX500_MUSB_RIDC)
+                       event = UX500_MUSB_VBUS;
+               break;
+
+       case USB_LINK_ACA_RID_A_8540:
+       case USB_LINK_ACA_DOCK_CHGR_8540:
+               event = UX500_MUSB_RIDA;
+       case USB_LINK_HM_IDGND_8540:
+       case USB_LINK_STD_UPSTREAM_8540:
+               if (ab->mode == USB_IDLE) {
+                       ab->mode = USB_HOST;
+                       ab8500_usb_host_phy_en(ab);
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_PREPARE, &ab->vbus_draw);
+               }
+               ab->phy.otg->default_a = true;
+               if (event != UX500_MUSB_RIDA)
+                       event = UX500_MUSB_ID;
+               atomic_notifier_call_chain(&ab->phy.notifier,
+                               event, &ab->vbus_draw);
+               break;
+
+       case USB_LINK_DEDICATED_CHG_8540:
+               ab->mode = USB_DEDICATED_CHG;
+               event = UX500_MUSB_CHARGER;
+               atomic_notifier_call_chain(&ab->phy.notifier,
+                               event, &ab->vbus_draw);
+               break;
+
+       case USB_LINK_PHYEN_NO_VBUS_NO_IDGND_8540:
+       case USB_LINK_STD_UPSTREAM_NO_IDGNG_VBUS_8540:
+               event = UX500_MUSB_NONE;
+               if (ab->mode == USB_HOST) {
+                       ab->phy.otg->default_a = false;
+                       ab->vbus_draw = 0;
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       event, &ab->vbus_draw);
+                       ab8500_usb_host_phy_dis(ab);
+                       ab->mode = USB_IDLE;
+               }
+               if (ab->mode == USB_PERIPHERAL) {
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       event, &ab->vbus_draw);
+                       ab8500_usb_peri_phy_dis(ab);
+                       atomic_notifier_call_chain(&ab->phy.notifier,
+                                       UX500_MUSB_CLEAN, &ab->vbus_draw);
+                       ab->mode = USB_IDLE;
+                       ab->phy.otg->default_a = false;
+                       ab->vbus_draw = 0;
+               }
+               break;
+
+       default:
+               event = UX500_MUSB_NONE;
+               break;
+       }
+
+       return 0;
+}
+
 static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
                enum ab8505_usb_link_status lsts)
 {
@@ -498,6 +859,20 @@ static int abx500_usb_link_status_update(struct ab8500_usb *ab)
                                AB8500_USB, AB8505_USB_LINE_STAT_REG, &reg);
                lsts = (reg >> 3) & 0x1F;
                ret = ab8505_usb_link_status_update(ab, lsts);
+       } else if (is_ab8540(ab->ab8500)) {
+               enum ab8540_usb_link_status lsts;
+
+               abx500_get_register_interruptible(ab->dev,
+                               AB8500_USB, AB8540_USB_LINK_STAT_REG, &reg);
+               lsts = (reg >> 3) & 0xFF;
+               ret = ab8540_usb_link_status_update(ab, lsts);
+       } else if (is_ab9540(ab->ab8500)) {
+               enum ab9540_usb_link_status lsts;
+
+               abx500_get_register_interruptible(ab->dev,
+                               AB8500_USB, AB9540_USB_LINK_STAT_REG, &reg);
+               lsts = (reg >> 3) & 0xFF;
+               ret = ab9540_usb_link_status_update(ab, lsts);
        }
 
        return ret;
@@ -553,7 +928,7 @@ static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
 
 static irqreturn_t ab8500_usb_link_status_irq(int irq, void *data)
 {
-       struct ab8500_usb *ab = (struct ab8500_usb *) data;
+       struct ab8500_usb *ab = (struct ab8500_usb *)data;
 
        abx500_usb_link_status_update(ab);
 
@@ -572,6 +947,69 @@ static void ab8500_usb_phy_disable_work(struct work_struct *work)
                ab8500_usb_peri_phy_dis(ab);
 }
 
+/* Check if VBUS is set and linkstatus has not detected a cable. */
+static bool ab8500_usb_check_vbus_status(struct ab8500_usb *ab)
+{
+       u8 isource2;
+       u8 reg;
+       enum ab8540_usb_link_status lsts;
+
+       abx500_get_register_interruptible(ab->dev,
+                       AB8500_INTERRUPT, AB8500_IT_SOURCE2_REG,
+                       &isource2);
+
+       /* If Vbus is below 3.6V abort */
+       if (!(isource2 & AB8500_BIT_SOURCE2_VBUSDET))
+               return false;
+
+       abx500_get_register_interruptible(ab->dev,
+                       AB8500_USB, AB8540_USB_LINK_STAT_REG,
+                       &reg);
+
+       lsts = (reg >> 3) & 0xFF;
+
+       /* Check if linkstatus has detected a cable */
+       if (lsts)
+               return false;
+
+       return true;
+}
+
+/* re-trigger charger detection again with watchdog re-kick. */
+static void ab8500_usb_vbus_turn_on_event_work(struct work_struct *work)
+{
+       struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
+                       vbus_event_work);
+
+       if (ab->mode != USB_IDLE)
+               return;
+
+       abx500_set_register_interruptible(ab->dev,
+                       AB8500_SYS_CTRL2_BLOCK, AB8500_MAIN_WD_CTRL_REG,
+                       AB8500_BIT_WD_CTRL_ENABLE);
+
+       udelay(100);
+
+       abx500_set_register_interruptible(ab->dev,
+                       AB8500_SYS_CTRL2_BLOCK, AB8500_MAIN_WD_CTRL_REG,
+                       AB8500_BIT_WD_CTRL_ENABLE | AB8500_BIT_WD_CTRL_KICK);
+
+       udelay(100);
+
+       /* Disable Main watchdog */
+       abx500_set_register_interruptible(ab->dev,
+                       AB8500_SYS_CTRL2_BLOCK, AB8500_MAIN_WD_CTRL_REG,
+                       0x0);
+
+       /* Enable USB Charger detection */
+       abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_USB, AB8540_VBUS_CTRL_REG,
+                       AB8540_BIT_VBUS_CTRL_CHARG_DET_ENA,
+                       AB8540_BIT_VBUS_CTRL_CHARG_DET_ENA);
+
+       ab->enabled_charging_detection = true;
+}
+
 static unsigned ab8500_eyediagram_workaroud(struct ab8500_usb *ab, unsigned mA)
 {
        /*
@@ -627,7 +1065,7 @@ static int ab8500_usb_set_peripheral(struct usb_otg *otg,
         * is fixed.
         */
 
-       if ((ab->mode != USB_IDLE) && (!gadget)) {
+       if ((ab->mode != USB_IDLE) && !gadget) {
                ab->mode = USB_IDLE;
                schedule_work(&ab->phy_dis_work);
        }
@@ -651,7 +1089,7 @@ static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
         * is fixed.
         */
 
-       if ((ab->mode != USB_IDLE) && (!host)) {
+       if ((ab->mode != USB_IDLE) && !host) {
                ab->mode = USB_IDLE;
                schedule_work(&ab->phy_dis_work);
        }
@@ -659,6 +1097,33 @@ static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
        return 0;
 }
 
+static void ab8500_usb_restart_phy(struct ab8500_usb *ab)
+{
+       abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_USB, AB8500_USB_PHY_CTRL_REG,
+                       AB8500_BIT_PHY_CTRL_DEVICE_EN,
+                       AB8500_BIT_PHY_CTRL_DEVICE_EN);
+
+       udelay(100);
+
+       abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_USB, AB8500_USB_PHY_CTRL_REG,
+                       AB8500_BIT_PHY_CTRL_DEVICE_EN,
+                       0);
+
+       abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_USB, AB8500_USB_PHY_CTRL_REG,
+                       AB8500_BIT_PHY_CTRL_HOST_EN,
+                       AB8500_BIT_PHY_CTRL_HOST_EN);
+
+       udelay(100);
+
+       abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_USB, AB8500_USB_PHY_CTRL_REG,
+                       AB8500_BIT_PHY_CTRL_HOST_EN,
+                       0);
+}
+
 static int ab8500_usb_regulator_get(struct ab8500_usb *ab)
 {
        int err;
@@ -693,48 +1158,197 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
        int err;
        int irq;
 
-       irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS");
-       if (irq < 0) {
-               dev_err(&pdev->dev, "Link status irq not found\n");
-               return irq;
-       }
-       err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
-                       ab8500_usb_link_status_irq,
-                       IRQF_NO_SUSPEND | IRQF_SHARED, "usb-link-status", ab);
-       if (err < 0) {
-               dev_err(ab->dev, "request_irq failed for link status irq\n");
-               return err;
+       if (ab->flags & AB8500_USB_FLAG_USE_LINK_STATUS_IRQ) {
+               irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS");
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "Link status irq not found\n");
+                       return irq;
+               }
+               err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+                               ab8500_usb_link_status_irq,
+                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               "usb-link-status", ab);
+               if (err < 0) {
+                       dev_err(ab->dev, "request_irq failed for link status irq\n");
+                       return err;
+               }
        }
 
-       irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
-       if (irq < 0) {
-               dev_err(&pdev->dev, "ID fall irq not found\n");
-               return irq;
-       }
-       err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
-                       ab8500_usb_disconnect_irq,
-                       IRQF_NO_SUSPEND | IRQF_SHARED, "usb-id-fall", ab);
-       if (err < 0) {
-               dev_err(ab->dev, "request_irq failed for ID fall irq\n");
-               return err;
+       if (ab->flags & AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ) {
+               irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "ID fall irq not found\n");
+                       return irq;
+               }
+               err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+                               ab8500_usb_disconnect_irq,
+                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               "usb-id-fall", ab);
+               if (err < 0) {
+                       dev_err(ab->dev, "request_irq failed for ID fall irq\n");
+                       return err;
+               }
        }
 
-       irq = platform_get_irq_byname(pdev, "VBUS_DET_F");
-       if (irq < 0) {
-               dev_err(&pdev->dev, "VBUS fall irq not found\n");
-               return irq;
-       }
-       err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
-                       ab8500_usb_disconnect_irq,
-                       IRQF_NO_SUSPEND | IRQF_SHARED, "usb-vbus-fall", ab);
-       if (err < 0) {
-               dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
-               return err;
+       if (ab->flags & AB8500_USB_FLAG_USE_VBUS_DET_IRQ) {
+               irq = platform_get_irq_byname(pdev, "VBUS_DET_F");
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "VBUS fall irq not found\n");
+                       return irq;
+               }
+               err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+                               ab8500_usb_disconnect_irq,
+                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               "usb-vbus-fall", ab);
+               if (err < 0) {
+                       dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
+                       return err;
+               }
        }
 
        return 0;
 }
 
+static void ab8500_usb_set_ab8500_tuning_values(struct ab8500_usb *ab)
+{
+       int err;
+
+       /* Enable the PBT/Bank 0x12 access */
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x01);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to enable bank12 access err=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE1, 0xC8);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE2, 0x00);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0x78);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
+                               err);
+
+       /* Switch to normal mode/disable Bank 0x12 access */
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x00);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to switch bank12 access err=%d\n",
+                               err);
+}
+
+static void ab8500_usb_set_ab8505_tuning_values(struct ab8500_usb *ab)
+{
+       int err;
+
+       /* Enable the PBT/Bank 0x12 access */
+       err = abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS,
+                       0x01, 0x01);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to enable bank12 access err=%d\n",
+                               err);
+
+       err = abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE1,
+                       0xC8, 0xC8);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n",
+                               err);
+
+       err = abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE2,
+                       0x60, 0x60);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n",
+                               err);
+
+       err = abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE3,
+                       0xFC, 0x80);
+
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
+                               err);
+
+       /* Switch to normal mode/disable Bank 0x12 access */
+       err = abx500_mask_and_set_register_interruptible(ab->dev,
+                       AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS,
+                       0x00, 0x00);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to switch bank12 access err=%d\n",
+                               err);
+}
+
+static void ab8500_usb_set_ab8540_tuning_values(struct ab8500_usb *ab)
+{
+       int err;
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8540_DEBUG, AB8500_USB_PHY_TUNE1, 0xCC);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE1 register ret=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8540_DEBUG, AB8500_USB_PHY_TUNE2, 0x60);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE2 register ret=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8540_DEBUG, AB8500_USB_PHY_TUNE3, 0x90);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE3 regester ret=%d\n",
+                               err);
+}
+
+static void ab8500_usb_set_ab9540_tuning_values(struct ab8500_usb *ab)
+{
+       int err;
+
+       /* Enable the PBT/Bank 0x12 access */
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x01);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to enable bank12 access err=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE1, 0xC8);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE2, 0x60);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n",
+                               err);
+
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0x80);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
+                               err);
+
+       /* Switch to normal mode/disable Bank 0x12 access */
+       err = abx500_set_register_interruptible(ab->dev,
+                       AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x00);
+       if (err < 0)
+               dev_err(ab->dev, "Failed to switch bank12 access err=%d\n",
+                               err);
+}
+
 static int ab8500_usb_probe(struct platform_device *pdev)
 {
        struct ab8500_usb       *ab;
@@ -772,6 +1386,33 @@ static int ab8500_usb_probe(struct platform_device *pdev)
        otg->set_host           = ab8500_usb_set_host;
        otg->set_peripheral     = ab8500_usb_set_peripheral;
 
+       if (is_ab8500(ab->ab8500)) {
+               ab->flags |= AB8500_USB_FLAG_USE_LINK_STATUS_IRQ |
+                       AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ |
+                       AB8500_USB_FLAG_USE_VBUS_DET_IRQ |
+                       AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
+       } else if (is_ab8505(ab->ab8500)) {
+               ab->flags |= AB8500_USB_FLAG_USE_LINK_STATUS_IRQ |
+                       AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ |
+                       AB8500_USB_FLAG_USE_VBUS_DET_IRQ |
+                       AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
+       } else if (is_ab8540(ab->ab8500)) {
+               ab->flags |= AB8500_USB_FLAG_USE_LINK_STATUS_IRQ |
+                       AB8500_USB_FLAG_USE_CHECK_VBUS_STATUS |
+                       AB8500_USB_FLAG_USE_VBUS_HOST_QUIRK |
+                       AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
+       } else if (is_ab9540(ab->ab8500)) {
+               ab->flags |= AB8500_USB_FLAG_USE_LINK_STATUS_IRQ |
+                       AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
+               if (is_ab9540_2p0_or_earlier(ab->ab8500))
+                       ab->flags |= AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ |
+                               AB8500_USB_FLAG_USE_VBUS_DET_IRQ;
+       }
+
+       /* Disable regulator voltage setting for AB8500 <= v2.0 */
+       if (is_ab8500_2p0_or_earlier(ab->ab8500))
+               ab->flags &= ~AB8500_USB_FLAG_REGULATOR_SET_VOLTAGE;
+
        platform_set_drvdata(pdev, ab);
 
        ATOMIC_INIT_NOTIFIER_HEAD(&ab->phy.notifier);
@@ -779,10 +1420,18 @@ static int ab8500_usb_probe(struct platform_device *pdev)
        /* all: Disable phy when called from set_host and set_peripheral */
        INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
 
+       INIT_WORK(&ab->vbus_event_work, ab8500_usb_vbus_turn_on_event_work);
+
        err = ab8500_usb_regulator_get(ab);
        if (err)
                return err;
 
+       ab->sysclk = devm_clk_get(ab->dev, "sysclk");
+       if (IS_ERR(ab->sysclk)) {
+               dev_err(ab->dev, "Could not get sysclk.\n");
+               return PTR_ERR(ab->sysclk);
+       }
+
        err = ab8500_usb_irq_setup(pdev, ab);
        if (err < 0)
                return err;
@@ -793,85 +1442,33 @@ static int ab8500_usb_probe(struct platform_device *pdev)
                return err;
        }
 
-       /* Phy tuning values for AB8500 */
-       if (!is_ab8500_2p0_or_earlier(ab->ab8500)) {
-               /* Enable the PBT/Bank 0x12 access */
-               err = abx500_set_register_interruptible(ab->dev,
-                               AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x01);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to enable bank12 access err=%d\n",
-                                       err);
-
-               err = abx500_set_register_interruptible(ab->dev,
-                               AB8500_DEBUG, AB8500_USB_PHY_TUNE1, 0xC8);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n",
-                                       err);
-
-               err = abx500_set_register_interruptible(ab->dev,
-                               AB8500_DEBUG, AB8500_USB_PHY_TUNE2, 0x00);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n",
-                                       err);
-
-               err = abx500_set_register_interruptible(ab->dev,
-                               AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0x78);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
-                                       err);
-
-               /* Switch to normal mode/disable Bank 0x12 access */
-               err = abx500_set_register_interruptible(ab->dev,
-                               AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x00);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to switch bank12 access err=%d\n",
-                                       err);
-       }
-
-       /* Phy tuning values for AB8505 */
-       if (is_ab8505(ab->ab8500)) {
-               /* Enable the PBT/Bank 0x12 access */
-               err = abx500_mask_and_set_register_interruptible(ab->dev,
-                               AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS,
-                               0x01, 0x01);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to enable bank12 access err=%d\n",
-                                       err);
-
-               err = abx500_mask_and_set_register_interruptible(ab->dev,
-                               AB8500_DEBUG, AB8500_USB_PHY_TUNE1,
-                               0xC8, 0xC8);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n",
-                                       err);
-
-               err = abx500_mask_and_set_register_interruptible(ab->dev,
-                               AB8500_DEBUG, AB8500_USB_PHY_TUNE2,
-                               0x60, 0x60);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n",
-                                       err);
-
-               err = abx500_mask_and_set_register_interruptible(ab->dev,
-                               AB8500_DEBUG, AB8500_USB_PHY_TUNE3,
-                               0xFC, 0x80);
-
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n",
-                                       err);
-
-               /* Switch to normal mode/disable Bank 0x12 access */
-               err = abx500_mask_and_set_register_interruptible(ab->dev,
-                               AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS,
-                               0x00, 0x00);
-               if (err < 0)
-                       dev_err(ab->dev, "Failed to switch bank12 access err=%d\n",
-                                       err);
-       }
+       if (is_ab8500(ab->ab8500) && !is_ab8500_2p0_or_earlier(ab->ab8500))
+               /* Phy tuning values for AB8500 > v2.0 */
+               ab8500_usb_set_ab8500_tuning_values(ab);
+       else if (is_ab8505(ab->ab8500))
+               /* Phy tuning values for AB8505 */
+               ab8500_usb_set_ab8505_tuning_values(ab);
+       else if (is_ab8540(ab->ab8500))
+               /* Phy tuning values for AB8540 */
+               ab8500_usb_set_ab8540_tuning_values(ab);
+       else if (is_ab9540(ab->ab8500))
+               /* Phy tuning values for AB9540 */
+               ab8500_usb_set_ab9540_tuning_values(ab);
 
        /* Needed to enable ID detection. */
        ab8500_usb_wd_workaround(ab);
 
+       /*
+        * This is required for usb-link-status to work properly when a
+        * cable is connected at boot time.
+        */
+       ab8500_usb_restart_phy(ab);
+
+       if (ab->flags & AB8500_USB_FLAG_USE_CHECK_VBUS_STATUS) {
+               if (ab8500_usb_check_vbus_status(ab))
+                       schedule_work(&ab->vbus_event_work);
+       }
+
        abx500_usb_link_status_update(ab);
 
        dev_info(&pdev->dev, "revision 0x%2x driver initialized\n", rev);
@@ -884,6 +1481,7 @@ static int ab8500_usb_remove(struct platform_device *pdev)
        struct ab8500_usb *ab = platform_get_drvdata(pdev);
 
        cancel_work_sync(&ab->phy_dis_work);
+       cancel_work_sync(&ab->vbus_event_work);
 
        usb_remove_phy(&ab->phy);
 
@@ -895,11 +1493,20 @@ static int ab8500_usb_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct platform_device_id ab8500_usb_devtype[] = {
+       { .name = "ab8500-usb", },
+       { .name = "ab8540-usb", },
+       { .name = "ab9540-usb", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, ab8500_usb_devtype);
+
 static struct platform_driver ab8500_usb_driver = {
        .probe          = ab8500_usb_probe,
        .remove         = ab8500_usb_remove,
+       .id_table       = ab8500_usb_devtype,
        .driver         = {
-               .name   = "ab8500-usb",
+               .name   = "abx5x0-usb",
                .owner  = THIS_MODULE,
        },
 };
@@ -916,7 +1523,6 @@ static void __exit ab8500_usb_exit(void)
 }
 module_exit(ab8500_usb_exit);
 
-MODULE_ALIAS("platform:ab8500_usb");
 MODULE_AUTHOR("ST-Ericsson AB");
-MODULE_DESCRIPTION("AB8500 usb transceiver driver");
+MODULE_DESCRIPTION("AB8500 family usb transceiver driver");
 MODULE_LICENSE("GPL");
index 638cc5dade35227732e77e6c5ccb0d769eb47d8b..55445e5d72e597c4476ff62d0c471ce3fb9433a3 100644 (file)
@@ -270,7 +270,7 @@ static struct platform_driver nop_usb_xceiv_driver = {
        .driver         = {
                .name   = "nop_usb_xceiv",
                .owner  = THIS_MODULE,
-               .of_match_table = of_match_ptr(nop_xceiv_dt_ids),
+               .of_match_table = nop_xceiv_dt_ids,
        },
 };
 
index a6e60b1e102e84c070a44e139d8e9ac2b1ff01c7..efe6e1464f45b59fb831a9ca5c05361593b3b7a5 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/delay.h>
 #include <linux/usb/omap_control_usb.h>
 
-#define        NUM_SYS_CLKS            5
+#define        NUM_SYS_CLKS            6
 #define        PLL_STATUS              0x00000004
 #define        PLL_GO                  0x00000008
 #define        PLL_CONFIGURATION1      0x0000000C
@@ -62,6 +62,7 @@ enum sys_clk_rate {
        CLK_RATE_12MHZ,
        CLK_RATE_16MHZ,
        CLK_RATE_19MHZ,
+       CLK_RATE_20MHZ,
        CLK_RATE_26MHZ,
        CLK_RATE_38MHZ
 };
@@ -72,6 +73,8 @@ static struct usb_dpll_params omap_usb3_dpll_params[NUM_SYS_CLKS] = {
        {1172, 8, 4, 20, 65537},        /* 19.2 MHz */
        {1250, 12, 4, 20, 0},           /* 26 MHz */
        {3125, 47, 4, 20, 92843},       /* 38.4 MHz */
+       {1000, 7, 4, 10, 0},            /* 20 MHz */
+
 };
 
 static int omap_usb3_suspend(struct usb_phy *x, int suspend)
@@ -122,6 +125,8 @@ static inline enum sys_clk_rate __get_sys_clk_index(unsigned long rate)
                return CLK_RATE_16MHZ;
        case 19200000:
                return CLK_RATE_19MHZ;
+       case 20000000:
+               return CLK_RATE_20MHZ;
        case 26000000:
                return CLK_RATE_26MHZ;
        case 38400000:
index a35681b0c5014fdbaa44cbbf97facb2f2ec74117..23c3dd30b2f0e654437ff415ab562ecb9c42510f 100644 (file)
@@ -161,7 +161,7 @@ static int rcar_usb_phy_probe(struct platform_device *pdev)
         * CAUTION
         *
         * Because this phy address is also mapped under OHCI/EHCI address area,
-        * this driver can't use devm_request_and_ioremap(dev, res) here
+        * this driver can't use devm_ioremap_resource(dev, res) here
         */
        reg0 = devm_ioremap_nocache(dev, res0->start, resource_size(res0));
        reg1 = devm_ioremap_nocache(dev, res1->start, resource_size(res1));
index 7b118ee5f5e4aee01b563e39d0b06def9c9463b0..ac025ca08425eec5d912bd228f59d73bc2b40b4f 100644 (file)
@@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(samsung_usbphy_parse_dt);
  * Here 'on = true' would mean USB PHY block is isolated, hence
  * de-activated and vice-versa.
  */
-void samsung_usbphy_set_isolation(struct samsung_usbphy *sphy, bool on)
+void samsung_usbphy_set_isolation_4210(struct samsung_usbphy *sphy, bool on)
 {
        void __iomem *reg = NULL;
        u32 reg_val;
@@ -84,32 +84,12 @@ void samsung_usbphy_set_isolation(struct samsung_usbphy *sphy, bool on)
                return;
        }
 
-       switch (sphy->drv_data->cpu_type) {
-       case TYPE_S3C64XX:
-               /*
-                * Do nothing: We will add here once S3C64xx goes for DT support
-                */
-               break;
-       case TYPE_EXYNOS4210:
-               /*
-                * Fall through since exynos4210 and exynos5250 have similar
-                * register architecture: two separate registers for host and
-                * device phy control with enable bit at position 0.
-                */
-       case TYPE_EXYNOS5250:
-               if (sphy->phy_type == USB_PHY_TYPE_DEVICE) {
-                       reg = sphy->pmuregs +
-                               sphy->drv_data->devphy_reg_offset;
-                       en_mask = sphy->drv_data->devphy_en_mask;
-               } else if (sphy->phy_type == USB_PHY_TYPE_HOST) {
-                       reg = sphy->pmuregs +
-                               sphy->drv_data->hostphy_reg_offset;
-                       en_mask = sphy->drv_data->hostphy_en_mask;
-               }
-               break;
-       default:
-               dev_err(sphy->dev, "Invalid SoC type\n");
-               return;
+       if (sphy->phy_type == USB_PHY_TYPE_DEVICE) {
+               reg = sphy->pmuregs + sphy->drv_data->devphy_reg_offset;
+               en_mask = sphy->drv_data->devphy_en_mask;
+       } else if (sphy->phy_type == USB_PHY_TYPE_HOST) {
+               reg = sphy->pmuregs + sphy->drv_data->hostphy_reg_offset;
+               en_mask = sphy->drv_data->hostphy_en_mask;
        }
 
        reg_val = readl(reg);
@@ -120,8 +100,13 @@ void samsung_usbphy_set_isolation(struct samsung_usbphy *sphy, bool on)
                reg_val |= en_mask;
 
        writel(reg_val, reg);
+
+       if (sphy->drv_data->cpu_type == TYPE_EXYNOS4X12) {
+               writel(reg_val, sphy->pmuregs + EXYNOS4X12_PHY_HSIC_CTRL0);
+               writel(reg_val, sphy->pmuregs + EXYNOS4X12_PHY_HSIC_CTRL1);
+       }
 }
-EXPORT_SYMBOL_GPL(samsung_usbphy_set_isolation);
+EXPORT_SYMBOL_GPL(samsung_usbphy_set_isolation_4210);
 
 /*
  * Configure the mode of working of usb-phy here: HOST/DEVICE.
@@ -162,73 +147,93 @@ int samsung_usbphy_set_type(struct usb_phy *phy,
 }
 EXPORT_SYMBOL_GPL(samsung_usbphy_set_type);
 
+int samsung_usbphy_rate_to_clksel_64xx(struct samsung_usbphy *sphy,
+                                                       unsigned long rate)
+{
+       unsigned int clksel;
+
+       switch (rate) {
+       case 12 * MHZ:
+               clksel = PHYCLK_CLKSEL_12M;
+               break;
+       case 24 * MHZ:
+               clksel = PHYCLK_CLKSEL_24M;
+               break;
+       case 48 * MHZ:
+               clksel = PHYCLK_CLKSEL_48M;
+               break;
+       default:
+               dev_err(sphy->dev,
+                       "Invalid reference clock frequency: %lu\n", rate);
+               return -EINVAL;
+       }
+
+       return clksel;
+}
+EXPORT_SYMBOL_GPL(samsung_usbphy_rate_to_clksel_64xx);
+
+int samsung_usbphy_rate_to_clksel_4x12(struct samsung_usbphy *sphy,
+                                                       unsigned long rate)
+{
+       unsigned int clksel;
+
+       switch (rate) {
+       case 9600 * KHZ:
+               clksel = FSEL_CLKSEL_9600K;
+               break;
+       case 10 * MHZ:
+               clksel = FSEL_CLKSEL_10M;
+               break;
+       case 12 * MHZ:
+               clksel = FSEL_CLKSEL_12M;
+               break;
+       case 19200 * KHZ:
+               clksel = FSEL_CLKSEL_19200K;
+               break;
+       case 20 * MHZ:
+               clksel = FSEL_CLKSEL_20M;
+               break;
+       case 24 * MHZ:
+               clksel = FSEL_CLKSEL_24M;
+               break;
+       case 50 * MHZ:
+               clksel = FSEL_CLKSEL_50M;
+               break;
+       default:
+               dev_err(sphy->dev,
+                       "Invalid reference clock frequency: %lu\n", rate);
+               return -EINVAL;
+       }
+
+       return clksel;
+}
+EXPORT_SYMBOL_GPL(samsung_usbphy_rate_to_clksel_4x12);
+
 /*
  * Returns reference clock frequency selection value
  */
 int samsung_usbphy_get_refclk_freq(struct samsung_usbphy *sphy)
 {
        struct clk *ref_clk;
-       int refclk_freq = 0;
+       unsigned long rate;
+       int refclk_freq;
 
        /*
         * In exynos5250 USB host and device PHY use
         * external crystal clock XXTI
         */
        if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
-               ref_clk = devm_clk_get(sphy->dev, "ext_xtal");
+               ref_clk = clk_get(sphy->dev, "ext_xtal");
        else
-               ref_clk = devm_clk_get(sphy->dev, "xusbxti");
+               ref_clk = clk_get(sphy->dev, "xusbxti");
        if (IS_ERR(ref_clk)) {
                dev_err(sphy->dev, "Failed to get reference clock\n");
                return PTR_ERR(ref_clk);
        }
 
-       if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250) {
-               /* set clock frequency for PLL */
-               switch (clk_get_rate(ref_clk)) {
-               case 9600 * KHZ:
-                       refclk_freq = FSEL_CLKSEL_9600K;
-                       break;
-               case 10 * MHZ:
-                       refclk_freq = FSEL_CLKSEL_10M;
-                       break;
-               case 12 * MHZ:
-                       refclk_freq = FSEL_CLKSEL_12M;
-                       break;
-               case 19200 * KHZ:
-                       refclk_freq = FSEL_CLKSEL_19200K;
-                       break;
-               case 20 * MHZ:
-                       refclk_freq = FSEL_CLKSEL_20M;
-                       break;
-               case 50 * MHZ:
-                       refclk_freq = FSEL_CLKSEL_50M;
-                       break;
-               case 24 * MHZ:
-               default:
-                       /* default reference clock */
-                       refclk_freq = FSEL_CLKSEL_24M;
-                       break;
-               }
-       } else {
-               switch (clk_get_rate(ref_clk)) {
-               case 12 * MHZ:
-                       refclk_freq = PHYCLK_CLKSEL_12M;
-                       break;
-               case 24 * MHZ:
-                       refclk_freq = PHYCLK_CLKSEL_24M;
-                       break;
-               case 48 * MHZ:
-                       refclk_freq = PHYCLK_CLKSEL_48M;
-                       break;
-               default:
-                       if (sphy->drv_data->cpu_type == TYPE_S3C64XX)
-                               refclk_freq = PHYCLK_CLKSEL_48M;
-                       else
-                               refclk_freq = PHYCLK_CLKSEL_24M;
-                       break;
-               }
-       }
+       rate = clk_get_rate(ref_clk);
+       refclk_freq = sphy->drv_data->rate_to_clksel(sphy, rate);
+
        clk_put(ref_clk);
 
        return refclk_freq;
index 70a9cae5e37fd0dbf6c53e4d67a593c983012940..68771bfd18253df19dd2f999086611d0314d4b90 100644 (file)
 #define RSTCON_HLINK_SWRST                     (0x1 << 1)
 #define RSTCON_SWRST                           (0x1 << 0)
 
+/* EXYNOS4X12 */
+#define EXYNOS4X12_PHY_HSIC_CTRL0              (0x04)
+#define EXYNOS4X12_PHY_HSIC_CTRL1              (0x08)
+
+#define PHYPWR_NORMAL_MASK_HSIC1               (0x7 << 12)
+#define PHYPWR_NORMAL_MASK_HSIC0               (0x7 << 9)
+#define PHYPWR_NORMAL_MASK_PHY1                        (0x7 << 6)
+
+#define RSTCON_HOSTPHY_SWRST                   (0xf << 3)
+
 /* EXYNOS5 */
 #define EXYNOS5_PHY_HOST_CTRL0                 (0x00)
 
 enum samsung_cpu_type {
        TYPE_S3C64XX,
        TYPE_EXYNOS4210,
+       TYPE_EXYNOS4X12,
        TYPE_EXYNOS5250,
 };
 
+struct samsung_usbphy;
+
 /*
  * struct samsung_usbphy_drvdata - driver data for various SoC variants
  * @cpu_type: machine identifier
@@ -268,6 +281,10 @@ struct samsung_usbphy_drvdata {
        int hostphy_en_mask;
        u32 devphy_reg_offset;
        u32 hostphy_reg_offset;
+       int (*rate_to_clksel)(struct samsung_usbphy *, unsigned long);
+       void (*set_isolation)(struct samsung_usbphy *, bool);
+       void (*phy_enable)(struct samsung_usbphy *);
+       void (*phy_disable)(struct samsung_usbphy *);
 };
 
 /*
@@ -320,8 +337,13 @@ static inline const struct samsung_usbphy_drvdata
 }
 
 extern int samsung_usbphy_parse_dt(struct samsung_usbphy *sphy);
-extern void samsung_usbphy_set_isolation(struct samsung_usbphy *sphy, bool on);
+extern void samsung_usbphy_set_isolation_4210(struct samsung_usbphy *sphy,
+                                                               bool on);
 extern void samsung_usbphy_cfg_sel(struct samsung_usbphy *sphy);
 extern int samsung_usbphy_set_type(struct usb_phy *phy,
                                        enum samsung_usb_phy_type phy_type);
 extern int samsung_usbphy_get_refclk_freq(struct samsung_usbphy *sphy);
+extern int samsung_usbphy_rate_to_clksel_64xx(struct samsung_usbphy *sphy,
+                                                       unsigned long rate);
+extern int samsung_usbphy_rate_to_clksel_4x12(struct samsung_usbphy *sphy,
+                                                       unsigned long rate);
index 9d5e273abcc7652f1b3dbe17992720a170124cdf..1011c16ade7e4b2de81db46d0ab8ddfc599b24bc 100644 (file)
@@ -176,6 +176,11 @@ static void samsung_usb2phy_enable(struct samsung_usbphy *sphy)
                phypwr &= ~PHYPWR_NORMAL_MASK;
                rstcon |= RSTCON_SWRST;
                break;
+       case TYPE_EXYNOS4X12:
+               phypwr &= ~(PHYPWR_NORMAL_MASK_HSIC0 |
+                               PHYPWR_NORMAL_MASK_HSIC1 |
+                               PHYPWR_NORMAL_MASK_PHY1);
+               rstcon |= RSTCON_HOSTPHY_SWRST;
        case TYPE_EXYNOS4210:
                phypwr &= ~PHYPWR_NORMAL_MASK_PHY0;
                rstcon |= RSTCON_SWRST;
@@ -189,6 +194,8 @@ static void samsung_usb2phy_enable(struct samsung_usbphy *sphy)
        /* reset all ports of PHY and Link */
        writel(rstcon, regs + SAMSUNG_RSTCON);
        udelay(10);
+       if (sphy->drv_data->cpu_type == TYPE_EXYNOS4X12)
+               rstcon &= ~RSTCON_HOSTPHY_SWRST;
        rstcon &= ~RSTCON_SWRST;
        writel(rstcon, regs + SAMSUNG_RSTCON);
 }
@@ -239,6 +246,10 @@ static void samsung_usb2phy_disable(struct samsung_usbphy *sphy)
        case TYPE_S3C64XX:
                phypwr |= PHYPWR_NORMAL_MASK;
                break;
+       case TYPE_EXYNOS4X12:
+               phypwr |= (PHYPWR_NORMAL_MASK_HSIC0 |
+                               PHYPWR_NORMAL_MASK_HSIC1 |
+                               PHYPWR_NORMAL_MASK_PHY1);
        case TYPE_EXYNOS4210:
                phypwr |= PHYPWR_NORMAL_MASK_PHY0;
        default:
@@ -284,17 +295,14 @@ static int samsung_usb2phy_init(struct usb_phy *phy)
        /* Disable phy isolation */
        if (sphy->plat && sphy->plat->pmu_isolation)
                sphy->plat->pmu_isolation(false);
-       else
-               samsung_usbphy_set_isolation(sphy, false);
+       else if (sphy->drv_data->set_isolation)
+               sphy->drv_data->set_isolation(sphy, false);
 
        /* Selecting Host/OTG mode; After reset USB2.0PHY_CFG: HOST */
        samsung_usbphy_cfg_sel(sphy);
 
        /* Initialize usb phy registers */
-       if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
-               samsung_exynos5_usb2phy_enable(sphy);
-       else
-               samsung_usb2phy_enable(sphy);
+       sphy->drv_data->phy_enable(sphy);
 
        spin_unlock_irqrestore(&sphy->lock, flags);
 
@@ -334,16 +342,13 @@ static void samsung_usb2phy_shutdown(struct usb_phy *phy)
        }
 
        /* De-initialize usb phy registers */
-       if (sphy->drv_data->cpu_type == TYPE_EXYNOS5250)
-               samsung_exynos5_usb2phy_disable(sphy);
-       else
-               samsung_usb2phy_disable(sphy);
+       sphy->drv_data->phy_disable(sphy);
 
        /* Enable phy isolation */
        if (sphy->plat && sphy->plat->pmu_isolation)
                sphy->plat->pmu_isolation(true);
-       else
-               samsung_usbphy_set_isolation(sphy, true);
+       else if (sphy->drv_data->set_isolation)
+               sphy->drv_data->set_isolation(sphy, true);
 
        spin_unlock_irqrestore(&sphy->lock, flags);
 
@@ -408,7 +413,10 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
        sphy->phy.label         = "samsung-usb2phy";
        sphy->phy.init          = samsung_usb2phy_init;
        sphy->phy.shutdown      = samsung_usb2phy_shutdown;
-       sphy->ref_clk_freq      = samsung_usbphy_get_refclk_freq(sphy);
+
+       sphy->ref_clk_freq = samsung_usbphy_get_refclk_freq(sphy);
+       if (sphy->ref_clk_freq < 0)
+               return -EINVAL;
 
        sphy->phy.otg           = otg;
        sphy->phy.otg->phy      = &sphy->phy;
@@ -438,18 +446,40 @@ static int samsung_usb2phy_remove(struct platform_device *pdev)
 static const struct samsung_usbphy_drvdata usb2phy_s3c64xx = {
        .cpu_type               = TYPE_S3C64XX,
        .devphy_en_mask         = S3C64XX_USBPHY_ENABLE,
+       .rate_to_clksel         = samsung_usbphy_rate_to_clksel_64xx,
+       .set_isolation          = NULL, /* TODO */
+       .phy_enable             = samsung_usb2phy_enable,
+       .phy_disable            = samsung_usb2phy_disable,
 };
 
 static const struct samsung_usbphy_drvdata usb2phy_exynos4 = {
        .cpu_type               = TYPE_EXYNOS4210,
        .devphy_en_mask         = EXYNOS_USBPHY_ENABLE,
        .hostphy_en_mask        = EXYNOS_USBPHY_ENABLE,
+       .rate_to_clksel         = samsung_usbphy_rate_to_clksel_64xx,
+       .set_isolation          = samsung_usbphy_set_isolation_4210,
+       .phy_enable             = samsung_usb2phy_enable,
+       .phy_disable            = samsung_usb2phy_disable,
+};
+
+static const struct samsung_usbphy_drvdata usb2phy_exynos4x12 = {
+       .cpu_type               = TYPE_EXYNOS4X12,
+       .devphy_en_mask         = EXYNOS_USBPHY_ENABLE,
+       .hostphy_en_mask        = EXYNOS_USBPHY_ENABLE,
+       .rate_to_clksel         = samsung_usbphy_rate_to_clksel_4x12,
+       .set_isolation          = samsung_usbphy_set_isolation_4210,
+       .phy_enable             = samsung_usb2phy_enable,
+       .phy_disable            = samsung_usb2phy_disable,
 };
 
 static struct samsung_usbphy_drvdata usb2phy_exynos5 = {
        .cpu_type               = TYPE_EXYNOS5250,
        .hostphy_en_mask        = EXYNOS_USBPHY_ENABLE,
        .hostphy_reg_offset     = EXYNOS_USBHOST_PHY_CTRL_OFFSET,
+       .rate_to_clksel         = samsung_usbphy_rate_to_clksel_4x12,
+       .set_isolation          = samsung_usbphy_set_isolation_4210,
+       .phy_enable             = samsung_exynos5_usb2phy_enable,
+       .phy_disable            = samsung_exynos5_usb2phy_disable,
 };
 
 #ifdef CONFIG_OF
@@ -460,6 +490,9 @@ static const struct of_device_id samsung_usbphy_dt_match[] = {
        }, {
                .compatible = "samsung,exynos4210-usb2phy",
                .data = &usb2phy_exynos4,
+       }, {
+               .compatible = "samsung,exynos4x12-usb2phy",
+               .data = &usb2phy_exynos4x12,
        }, {
                .compatible = "samsung,exynos5250-usb2phy",
                .data = &usb2phy_exynos5
@@ -476,6 +509,9 @@ static struct platform_device_id samsung_usbphy_driver_ids[] = {
        }, {
                .name           = "exynos4210-usb2phy",
                .driver_data    = (unsigned long)&usb2phy_exynos4,
+       }, {
+               .name           = "exynos4x12-usb2phy",
+               .driver_data    = (unsigned long)&usb2phy_exynos4x12,
        }, {
                .name           = "exynos5250-usb2phy",
                .driver_data    = (unsigned long)&usb2phy_exynos5,
index 5a9efcbcb532cc7ecf7d118b0767db0b970b7960..300e0cf5e31f4fdb7ada4034237750293335589d 100644 (file)
@@ -65,7 +65,7 @@ static u32 samsung_usb3phy_set_refclk(struct samsung_usbphy *sphy)
        return reg;
 }
 
-static int samsung_exynos5_usb3phy_enable(struct samsung_usbphy *sphy)
+static void samsung_exynos5_usb3phy_enable(struct samsung_usbphy *sphy)
 {
        void __iomem *regs = sphy->regs;
        u32 phyparam0;
@@ -133,8 +133,6 @@ static int samsung_exynos5_usb3phy_enable(struct samsung_usbphy *sphy)
 
        phyclkrst &= ~(PHYCLKRST_PORTRESET);
        writel(phyclkrst, regs + EXYNOS5_DRD_PHYCLKRST);
-
-       return 0;
 }
 
 static void samsung_exynos5_usb3phy_disable(struct samsung_usbphy *sphy)
@@ -184,10 +182,11 @@ static int samsung_usb3phy_init(struct usb_phy *phy)
        samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE);
 
        /* Disable phy isolation */
-       samsung_usbphy_set_isolation(sphy, false);
+       if (sphy->drv_data->set_isolation)
+               sphy->drv_data->set_isolation(sphy, false);
 
        /* Initialize usb phy registers */
-       samsung_exynos5_usb3phy_enable(sphy);
+       sphy->drv_data->phy_enable(sphy);
 
        spin_unlock_irqrestore(&sphy->lock, flags);
 
@@ -218,10 +217,11 @@ static void samsung_usb3phy_shutdown(struct usb_phy *phy)
        samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE);
 
        /* De-initialize usb phy registers */
-       samsung_exynos5_usb3phy_disable(sphy);
+       sphy->drv_data->phy_disable(sphy);
 
        /* Enable phy isolation */
-       samsung_usbphy_set_isolation(sphy, true);
+       if (sphy->drv_data->set_isolation)
+               sphy->drv_data->set_isolation(sphy, true);
 
        spin_unlock_irqrestore(&sphy->lock, flags);
 
@@ -274,7 +274,10 @@ static int samsung_usb3phy_probe(struct platform_device *pdev)
        sphy->phy.init          = samsung_usb3phy_init;
        sphy->phy.shutdown      = samsung_usb3phy_shutdown;
        sphy->drv_data          = samsung_usbphy_get_driver_data(pdev);
-       sphy->ref_clk_freq      = samsung_usbphy_get_refclk_freq(sphy);
+
+       sphy->ref_clk_freq = samsung_usbphy_get_refclk_freq(sphy);
+       if (sphy->ref_clk_freq < 0)
+               return -EINVAL;
 
        spin_lock_init(&sphy->lock);
 
@@ -300,6 +303,10 @@ static int samsung_usb3phy_remove(struct platform_device *pdev)
 static struct samsung_usbphy_drvdata usb3phy_exynos5 = {
        .cpu_type               = TYPE_EXYNOS5250,
        .devphy_en_mask         = EXYNOS_USBPHY_ENABLE,
+       .rate_to_clksel         = samsung_usbphy_rate_to_clksel_4x12,
+       .set_isolation          = samsung_usbphy_set_isolation_4210,
+       .phy_enable             = samsung_exynos5_usb3phy_enable,
+       .phy_disable            = samsung_exynos5_usb3phy_disable,
 };
 
 #ifdef CONFIG_OF
index 17d811292f3a9316232569af657993d811173079..cec0855ed24852f3d33b7a38970bf7dc4c13a728 100644 (file)
@@ -1,9 +1,11 @@
 /*
  * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2013 NVIDIA Corporation
  *
  * Author:
  *     Erik Gilling <konkers@google.com>
  *     Benoit Goby <benoit@android.com>
+ *     Venu Byravarasu <vbyravarasu@nvidia.com>
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -21,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 #include <asm/mach-types.h>
+#include <linux/usb/ehci_def.h>
 #include <linux/usb/tegra_usb_phy.h>
 
-#define TEGRA_USB_BASE         0xC5000000
-#define TEGRA_USB_SIZE         SZ_16K
-
 #define ULPI_VIEWPORT          0x170
 
+/* PORTSC registers */
+#define TEGRA_USB_PORTSC1                              0x184
+#define TEGRA_USB_PORTSC1_PTS(x)                       (((x) & 0x3) << 30)
+#define TEGRA_USB_PORTSC1_PHCD                         (1 << 23)
+
+/* Bits of PORTSC1, which will get cleared by writing 1 into them */
+#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
+
 #define USB_SUSP_CTRL          0x400
 #define   USB_WAKE_ON_CNNT_EN_DEV      (1 << 3)
 #define   USB_WAKE_ON_DISCON_EN_DEV    (1 << 4)
@@ -196,34 +205,41 @@ static struct tegra_utmip_config utmip_default[] = {
        },
 };
 
+static void set_pts(struct tegra_usb_phy *phy, u8 pts_val)
+{
+       void __iomem *base = phy->regs;
+       unsigned long val;
+
+       val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+       val &= ~TEGRA_USB_PORTSC1_PTS(3);
+       val |= TEGRA_USB_PORTSC1_PTS(pts_val & 3);
+       writel(val, base + TEGRA_USB_PORTSC1);
+}
+
+static void set_phcd(struct tegra_usb_phy *phy, bool enable)
+{
+       void __iomem *base = phy->regs;
+       unsigned long val;
+
+       val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+       if (enable)
+               val |= TEGRA_USB_PORTSC1_PHCD;
+       else
+               val &= ~TEGRA_USB_PORTSC1_PHCD;
+       writel(val, base + TEGRA_USB_PORTSC1);
+}
+
 static int utmip_pad_open(struct tegra_usb_phy *phy)
 {
-       phy->pad_clk = clk_get_sys("utmip-pad", NULL);
+       phy->pad_clk = devm_clk_get(phy->dev, "utmi-pads");
        if (IS_ERR(phy->pad_clk)) {
                pr_err("%s: can't get utmip pad clock\n", __func__);
                return PTR_ERR(phy->pad_clk);
        }
 
-       if (phy->is_legacy_phy) {
-               phy->pad_regs = phy->regs;
-       } else {
-               phy->pad_regs = ioremap(TEGRA_USB_BASE, TEGRA_USB_SIZE);
-               if (!phy->pad_regs) {
-                       pr_err("%s: can't remap usb registers\n", __func__);
-                       clk_put(phy->pad_clk);
-                       return -ENOMEM;
-               }
-       }
        return 0;
 }
 
-static void utmip_pad_close(struct tegra_usb_phy *phy)
-{
-       if (!phy->is_legacy_phy)
-               iounmap(phy->pad_regs);
-       clk_put(phy->pad_clk);
-}
-
 static void utmip_pad_power_on(struct tegra_usb_phy *phy)
 {
        unsigned long val, flags;
@@ -299,7 +315,7 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
                val &= ~USB_SUSP_SET;
                writel(val, base + USB_SUSP_CTRL);
        } else
-               phy->set_phcd(&phy->u_phy, true);
+               set_phcd(phy, true);
 
        if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
                pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
@@ -321,7 +337,7 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
                val &= ~USB_SUSP_CLR;
                writel(val, base + USB_SUSP_CTRL);
        } else
-               phy->set_phcd(&phy->u_phy, false);
+               set_phcd(phy, false);
 
        if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
                                                     USB_PHY_CLK_VALID))
@@ -444,7 +460,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
        utmi_phy_clk_enable(phy);
 
        if (!phy->is_legacy_phy)
-               phy->set_pts(&phy->u_phy, 0);
+               set_pts(phy, 0);
 
        return 0;
 }
@@ -541,11 +557,18 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
        int ret;
        unsigned long val;
        void __iomem *base = phy->regs;
-       struct tegra_ulpi_config *config = phy->config;
 
-       gpio_direction_output(config->reset_gpio, 0);
+       ret = gpio_direction_output(phy->reset_gpio, 0);
+       if (ret < 0) {
+               dev_err(phy->dev, "gpio %d not set to 0\n", phy->reset_gpio);
+               return ret;
+       }
        msleep(5);
-       gpio_direction_output(config->reset_gpio, 1);
+       ret = gpio_direction_output(phy->reset_gpio, 1);
+       if (ret < 0) {
+               dev_err(phy->dev, "gpio %d not set to 1\n", phy->reset_gpio);
+               return ret;
+       }
 
        clk_prepare_enable(phy->clk);
        msleep(1);
@@ -603,63 +626,15 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
 
 static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
 {
-       struct tegra_ulpi_config *config = phy->config;
-
        clk_disable(phy->clk);
-       return gpio_direction_output(config->reset_gpio, 0);
-}
-
-static int     tegra_phy_init(struct usb_phy *x)
-{
-       struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
-       struct tegra_ulpi_config *ulpi_config;
-       int err;
-
-       if (phy->is_ulpi_phy) {
-               ulpi_config = phy->config;
-               phy->clk = clk_get_sys(NULL, ulpi_config->clk);
-               if (IS_ERR(phy->clk)) {
-                       pr_err("%s: can't get ulpi clock\n", __func__);
-                       err = -ENXIO;
-                       goto err1;
-               }
-               if (!gpio_is_valid(ulpi_config->reset_gpio))
-                       ulpi_config->reset_gpio =
-                               of_get_named_gpio(phy->dev->of_node,
-                                                 "nvidia,phy-reset-gpio", 0);
-               if (!gpio_is_valid(ulpi_config->reset_gpio)) {
-                       pr_err("%s: invalid reset gpio: %d\n", __func__,
-                              ulpi_config->reset_gpio);
-                       err = -EINVAL;
-                       goto err1;
-               }
-               gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
-               gpio_direction_output(ulpi_config->reset_gpio, 0);
-               phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
-               phy->ulpi->io_priv = phy->regs + ULPI_VIEWPORT;
-       } else {
-               err = utmip_pad_open(phy);
-               if (err < 0)
-                       goto err1;
-       }
-       return 0;
-err1:
-       clk_disable_unprepare(phy->pll_u);
-       clk_put(phy->pll_u);
-       return err;
+       return gpio_direction_output(phy->reset_gpio, 0);
 }
 
 static void tegra_usb_phy_close(struct usb_phy *x)
 {
        struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
 
-       if (phy->is_ulpi_phy)
-               clk_put(phy->clk);
-       else
-               utmip_pad_close(phy);
        clk_disable_unprepare(phy->pll_u);
-       clk_put(phy->pll_u);
-       kfree(phy);
 }
 
 static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
@@ -687,54 +662,63 @@ static int        tegra_usb_phy_suspend(struct usb_phy *x, int suspend)
                return tegra_usb_phy_power_on(phy);
 }
 
-struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
-       void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode,
-       void (*set_pts)(struct usb_phy *x, u8 pts_val),
-       void (*set_phcd)(struct usb_phy *x, bool enable))
+static int ulpi_open(struct tegra_usb_phy *phy)
+{
+       int err;
+
+       phy->clk = devm_clk_get(phy->dev, "ulpi-link");
+       if (IS_ERR(phy->clk)) {
+               pr_err("%s: can't get ulpi clock\n", __func__);
+               return PTR_ERR(phy->clk);
+       }
+
+       err = devm_gpio_request(phy->dev, phy->reset_gpio, "ulpi_phy_reset_b");
+       if (err < 0) {
+               dev_err(phy->dev, "request failed for gpio: %d\n",
+                      phy->reset_gpio);
+               return err;
+       }
+
+       err = gpio_direction_output(phy->reset_gpio, 0);
+       if (err < 0) {
+               dev_err(phy->dev, "gpio %d direction not set to output\n",
+                      phy->reset_gpio);
+               return err;
+       }
+
+       phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
+       if (!phy->ulpi) {
+               dev_err(phy->dev, "otg_ulpi_create returned NULL\n");
+               err = -ENOMEM;
+               return err;
+       }
+
+       phy->ulpi->io_priv = phy->regs + ULPI_VIEWPORT;
+       return 0;
+}
 
+static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
 {
-       struct tegra_usb_phy *phy;
        unsigned long parent_rate;
        int i;
        int err;
-       struct device_node *np = dev->of_node;
-
-       phy = kzalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
-       if (!phy)
-               return ERR_PTR(-ENOMEM);
-
-       phy->instance = instance;
-       phy->regs = regs;
-       phy->config = config;
-       phy->mode = phy_mode;
-       phy->dev = dev;
-       phy->is_legacy_phy =
-               of_property_read_bool(np, "nvidia,has-legacy-mode");
-       phy->set_pts = set_pts;
-       phy->set_phcd = set_phcd;
-       err = of_property_match_string(np, "phy_type", "ulpi");
-       if (err < 0)
-               phy->is_ulpi_phy = false;
-       else
-               phy->is_ulpi_phy = true;
-
-       if (!phy->config) {
-               if (phy->is_ulpi_phy) {
-                       pr_err("%s: ulpi phy configuration missing", __func__);
-                       err = -EINVAL;
-                       goto err0;
-               } else {
-                       phy->config = &utmip_default[instance];
-               }
+
+       if (!phy->is_ulpi_phy) {
+               if (phy->is_legacy_phy)
+                       phy->config = &utmip_default[0];
+               else
+                       phy->config = &utmip_default[2];
        }
 
-       phy->pll_u = clk_get_sys(NULL, "pll_u");
+       phy->pll_u = devm_clk_get(phy->dev, "pll_u");
        if (IS_ERR(phy->pll_u)) {
                pr_err("Can't get pll_u clock\n");
-               err = PTR_ERR(phy->pll_u);
-               goto err0;
+               return PTR_ERR(phy->pll_u);
        }
-       clk_prepare_enable(phy->pll_u);
+
+       err = clk_prepare_enable(phy->pll_u);
+       if (err)
+               return err;
 
        parent_rate = clk_get_rate(clk_get_parent(phy->pll_u));
        for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) {
@@ -746,23 +730,22 @@ struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
        if (!phy->freq) {
                pr_err("invalid pll_u parent rate %ld\n", parent_rate);
                err = -EINVAL;
-               goto err1;
+               goto fail;
        }
 
-       phy->u_phy.init = tegra_phy_init;
-       phy->u_phy.shutdown = tegra_usb_phy_close;
-       phy->u_phy.set_suspend = tegra_usb_phy_suspend;
+       if (phy->is_ulpi_phy)
+               err = ulpi_open(phy);
+       else
+               err = utmip_pad_open(phy);
+       if (err < 0)
+               goto fail;
 
-       return phy;
+       return 0;
 
-err1:
+fail:
        clk_disable_unprepare(phy->pll_u);
-       clk_put(phy->pll_u);
-err0:
-       kfree(phy);
-       return ERR_PTR(err);
+       return err;
 }
-EXPORT_SYMBOL_GPL(tegra_usb_phy_open);
 
 void tegra_usb_phy_preresume(struct usb_phy *x)
 {
@@ -801,3 +784,124 @@ void tegra_ehci_phy_restore_end(struct usb_phy *x)
 }
 EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_end);
 
+static int tegra_usb_phy_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct tegra_usb_phy *tegra_phy = NULL;
+       struct device_node *np = pdev->dev.of_node;
+       int err;
+
+       tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
+       if (!tegra_phy) {
+               dev_err(&pdev->dev, "unable to allocate memory for USB2 PHY\n");
+               return -ENOMEM;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get I/O memory\n");
+               return  -ENXIO;
+       }
+
+       tegra_phy->regs = devm_ioremap(&pdev->dev, res->start,
+               resource_size(res));
+       if (!tegra_phy->regs) {
+               dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+               return -ENOMEM;
+       }
+
+       tegra_phy->is_legacy_phy =
+               of_property_read_bool(np, "nvidia,has-legacy-mode");
+
+       err = of_property_match_string(np, "phy_type", "ulpi");
+       if (err < 0) {
+               tegra_phy->is_ulpi_phy = false;
+
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               if (!res) {
+                       dev_err(&pdev->dev, "Failed to get UTMI Pad regs\n");
+                       return  -ENXIO;
+               }
+
+               tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
+                       resource_size(res));
+               if (!tegra_phy->regs) {
+                       dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
+                       return -ENOMEM;
+               }
+       } else {
+               tegra_phy->is_ulpi_phy = true;
+
+               tegra_phy->reset_gpio =
+                       of_get_named_gpio(np, "nvidia,phy-reset-gpio", 0);
+               if (!gpio_is_valid(tegra_phy->reset_gpio)) {
+                       dev_err(&pdev->dev, "invalid gpio: %d\n",
+                               tegra_phy->reset_gpio);
+                       return tegra_phy->reset_gpio;
+               }
+       }
+
+       err = of_property_match_string(np, "dr_mode", "otg");
+       if (err < 0) {
+               err = of_property_match_string(np, "dr_mode", "peripheral");
+               if (err < 0)
+                       tegra_phy->mode = TEGRA_USB_PHY_MODE_HOST;
+               else
+                       tegra_phy->mode = TEGRA_USB_PHY_MODE_DEVICE;
+       } else
+               tegra_phy->mode = TEGRA_USB_PHY_MODE_OTG;
+
+       tegra_phy->dev = &pdev->dev;
+       err = tegra_usb_phy_init(tegra_phy);
+       if (err < 0)
+               return err;
+
+       tegra_phy->u_phy.shutdown = tegra_usb_phy_close;
+       tegra_phy->u_phy.set_suspend = tegra_usb_phy_suspend;
+
+       dev_set_drvdata(&pdev->dev, tegra_phy);
+       return 0;
+}
+
+static struct of_device_id tegra_usb_phy_id_table[] = {
+       { .compatible = "nvidia,tegra20-usb-phy", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
+
+static struct platform_driver tegra_usb_phy_driver = {
+       .probe          = tegra_usb_phy_probe,
+       .driver         = {
+               .name   = "tegra-phy",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(tegra_usb_phy_id_table),
+       },
+};
+module_platform_driver(tegra_usb_phy_driver);
+
+static int tegra_usb_phy_match(struct device *dev, void *data)
+{
+       struct tegra_usb_phy *tegra_phy = dev_get_drvdata(dev);
+       struct device_node *dn = data;
+
+       return (tegra_phy->dev->of_node == dn) ? 1 : 0;
+}
+
+struct usb_phy *tegra_usb_get_phy(struct device_node *dn)
+{
+       struct device *dev;
+       struct tegra_usb_phy *tegra_phy;
+
+       dev = driver_find_device(&tegra_usb_phy_driver.driver, NULL, dn,
+                                tegra_usb_phy_match);
+       if (!dev)
+               return ERR_PTR(-EPROBE_DEFER);
+
+       tegra_phy = dev_get_drvdata(dev);
+
+       return &tegra_phy->u_phy;
+}
+EXPORT_SYMBOL_GPL(tegra_usb_get_phy);
+
+MODULE_DESCRIPTION("Tegra USB PHY driver");
+MODULE_LICENSE("GPL v2");
index c5ba7e5423fc5626701aeb029e6c720d90649c18..7c22a5390fc34dddf8e6f9987da559d32732a406 100644 (file)
@@ -12,6 +12,7 @@
  *
  */
 
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/usb.h>
 #include <linux/io.h>
@@ -78,3 +79,4 @@ struct usb_phy_io_ops ulpi_viewport_access_ops = {
        .read   = ulpi_viewport_read,
        .write  = ulpi_viewport_write,
 };
+EXPORT_SYMBOL_GPL(ulpi_viewport_access_ops);
index 1d55762afbb118bdb2d62fbb960490e02b4479cb..8c3a42ea910cd1d6ef7b503bf4a753f7f521e343 100644 (file)
@@ -710,6 +710,16 @@ config USB_SERIAL_QT2
          To compile this driver as a module, choose M here: the
          module will be called quatech-serial.
 
+config USB_SERIAL_FLASHLOADER
+       tristate "Infineon Modem Flashloader USB interface driver"
+       help
+         Say Y here if you want to download Infineon Modem
+         via USB Flashloader serial driver.
+
+         To compile this driver as a module, choose M here: the
+         module will be called flashloader.
+
+
 config USB_SERIAL_DEBUG
        tristate "USB Debugging Device"
        help
index cec63fa191046eda1e3b0d095186069f38f01a90..f7130114488f8b95a4ee1c8570c61417cfcdb97b 100644 (file)
@@ -65,3 +65,4 @@ obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL)               += vivopay-serial.o
 obj-$(CONFIG_USB_SERIAL_XSENS_MT)              += xsens_mt.o
 obj-$(CONFIG_USB_SERIAL_ZIO)                   += zio.o
 obj-$(CONFIG_USB_SERIAL_ZTE)                   += zte_ev.o
+obj-$(CONFIG_USB_SERIAL_FLASHLOADER)           += flashloader.o
index 40e7fd94646f4bfbad247c486bc8659dad6d8f0a..bc77e955cbefda0c19f6847553b3897b0d4c6274 100644 (file)
@@ -413,8 +413,8 @@ static int ark3116_ioctl(struct tty_struct *tty,
                /* XXX: Some of these values are probably wrong. */
                memset(&serstruct, 0, sizeof(serstruct));
                serstruct.type = PORT_16654;
-               serstruct.line = port->serial->minor;
-               serstruct.port = port->number;
+               serstruct.line = port->minor;
+               serstruct.port = port->port_number;
                serstruct.custom_divisor = 0;
                serstruct.baud_base = 460800;
 
index 3c4db6d196c630257611883e9f8f7ec46f5c20b6..f053b302a00d543e83fe6562a995ff9960c4e9af 100644 (file)
@@ -43,7 +43,7 @@ static ssize_t show_port_number(struct device *dev,
 {
        struct usb_serial_port *port = to_usb_serial_port(dev);
 
-       return sprintf(buf, "%d\n", port->number - port->serial->minor);
+       return sprintf(buf, "%d\n", port->port_number);
 }
 
 static DEVICE_ATTR(port_number, S_IRUGO, show_port_number, NULL);
@@ -80,7 +80,7 @@ static int usb_serial_device_probe(struct device *dev)
                goto exit_with_autopm;
        }
 
-       minor = port->number;
+       minor = port->minor;
        tty_register_device(usb_serial_tty_driver, minor, dev);
        dev_info(&port->serial->dev->dev,
                 "%s converter now attached to ttyUSB%d\n",
@@ -106,7 +106,7 @@ static int usb_serial_device_remove(struct device *dev)
        /* make sure suspend/resume doesn't race against port_remove */
        usb_autopm_get_interface(port->serial->interface);
 
-       minor = port->number;
+       minor = port->minor;
        tty_unregister_device(usb_serial_tty_driver, minor);
 
        device_remove_file(&port->dev, &dev_attr_port_number);
index 5f3bcd31e2045b89a821e1bfe10814d6089534ca..afb50eab2049126fe73bbc13df6880f6324271b9 100644 (file)
@@ -108,18 +108,18 @@ static int usb_console_setup(struct console *co, char *options)
         * no need to check the index here: if the index is wrong, console
         * code won't call us
         */
-       serial = usb_serial_get_by_index(co->index);
-       if (serial == NULL) {
+       port = usb_serial_port_get_by_minor(co->index);
+       if (port == NULL) {
                /* no device is connected yet, sorry :( */
                pr_err("No USB device connected to ttyUSB%i\n", co->index);
                return -ENODEV;
        }
+       serial = port->serial;
 
        retval = usb_autopm_get_interface(serial->interface);
        if (retval)
                goto error_get_interface;
 
-       port = serial->port[co->index - serial->minor];
        tty_port_tty_set(&port->port, NULL);
 
        info->port = port;
@@ -210,7 +210,7 @@ static void usb_console_write(struct console *co,
        if (count == 0)
                return;
 
-       pr_debug("%s - port %d, %d byte(s)\n", __func__, port->number, count);
+       pr_debug("%s - minor %d, %d byte(s)\n", __func__, port->minor, count);
 
        if (!port->port.console) {
                pr_debug("%s - port not opened\n", __func__);
index 2c659553c07c01df5dc1a65be8341809939fc6f4..d6ef2f8da37dca2f7206f26db7d6269bdf0d13e6 100644 (file)
@@ -666,8 +666,6 @@ static void cp210x_set_termios(struct tty_struct *tty,
        unsigned int bits;
        unsigned int modem_ctl[4];
 
-       dev_dbg(dev, "%s - port %d\n", __func__, port->number);
-
        if (!tty)
                return;
 
index 082120198f870b2104d5230a5f1274803bc1074f..e948dc02795d4ede34c7c132d9fb03a1142cf0f5 100644 (file)
@@ -435,7 +435,7 @@ static void cypress_set_dead(struct usb_serial_port *port)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        dev_err(&port->dev, "cypress_m8 suspending failing port %d - "
-               "interval might be too short\n", port->number);
+               "interval might be too short\n", port->port_number);
 }
 
 
@@ -667,7 +667,7 @@ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
 {
        struct cypress_private *priv = usb_get_serial_port_data(port);
 
-       dev_dbg(&port->dev, "%s - port %d, %d bytes\n", __func__, port->number, count);
+       dev_dbg(&port->dev, "%s - %d bytes\n", __func__, count);
 
        /* line control commands, which need to be executed immediately,
           are not put into the buffer for obvious reasons.
index 7b807d38952783b43854370590e0cc9db628ae49..19b467fe03886ec5a16d1a069982857bf3074507 100644 (file)
@@ -1304,11 +1304,7 @@ static void digi_release(struct usb_serial *serial)
 
 static int digi_port_probe(struct usb_serial_port *port)
 {
-       unsigned port_num;
-
-       port_num = port->number - port->serial->minor;
-
-       return digi_port_init(port, port_num);
+       return digi_port_init(port, port->port_number);
 }
 
 static int digi_port_remove(struct usb_serial_port *port)
index 7d8dd5aad236ee8f52ac761eda06c54c5f9539c3..75e85cbf9e8b38032c8d174963c7b831b515369a 100644 (file)
@@ -288,15 +288,14 @@ static int f81232_ioctl(struct tty_struct *tty,
        struct serial_struct ser;
        struct usb_serial_port *port = tty->driver_data;
 
-       dev_dbg(&port->dev, "%s (%d) cmd = 0x%04x\n", __func__,
-               port->number, cmd);
+       dev_dbg(&port->dev, "%s cmd = 0x%04x\n", __func__, cmd);
 
        switch (cmd) {
        case TIOCGSERIAL:
                memset(&ser, 0, sizeof ser);
                ser.type = PORT_16654;
-               ser.line = port->serial->minor;
-               ser.port = port->number;
+               ser.line = port->minor;
+               ser.port = port->port_number;
                ser.baud_base = 460800;
 
                if (copy_to_user((void __user *)arg, &ser, sizeof ser))
diff --git a/drivers/usb/serial/flashloader.c b/drivers/usb/serial/flashloader.c
new file mode 100644 (file)
index 0000000..e6f5c10
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Infineon Flashloader driver
+ *
+ * Copyright (C) 2013 Wei Shuai <cpuwolf@gmail.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License version
+ *     2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/uaccess.h>
+
+static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x8087, 0x0716) },
+       { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_serial_driver flashloader_device = {
+       .driver = {
+               .owner =        THIS_MODULE,
+               .name =         "flashloader",
+       },
+       .id_table =             id_table,
+       .num_ports =            1,
+};
+
+static struct usb_serial_driver * const serial_drivers[] = {
+       &flashloader_device, NULL
+};
+
+module_usb_serial_driver(serial_drivers, id_table);
+MODULE_LICENSE("GPL");
index b110c573ea85319490922d8f3c538d507913c060..04b5ed90ffb2ded9739407e081e4dd4f52794e69 100644 (file)
@@ -948,9 +948,9 @@ static void garmin_close(struct usb_serial_port *port)
 {
        struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
 
-       dev_dbg(&port->dev, "%s - port %d - mode=%d state=%d flags=0x%X\n",
-               __func__, port->number, garmin_data_p->mode,
-               garmin_data_p->state, garmin_data_p->flags);
+       dev_dbg(&port->dev, "%s - mode=%d state=%d flags=0x%X\n",
+               __func__, garmin_data_p->mode, garmin_data_p->state,
+               garmin_data_p->flags);
 
        garmin_clear(garmin_data_p);
 
index 1477e8593476deb54a831e3169c00c42e705c50a..dc2803b5eb09c35fa25c75fd376474e42a327b03 100644 (file)
@@ -915,8 +915,8 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
                return -ENOMEM;
        }
 
-       dev_dbg(dev, "%s(%d) - Initialize TX fifo to %d bytes\n",
-               __func__, port->number, edge_port->maxTxCredits);
+       dev_dbg(dev, "%s - Initialize TX fifo to %d bytes\n",
+               __func__, edge_port->maxTxCredits);
 
        return 0;
 }
@@ -1122,9 +1122,8 @@ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
        copySize = min((unsigned int)count,
                                (edge_port->txCredits - fifo->count));
 
-       dev_dbg(&port->dev, "%s(%d) of %d byte(s) Fifo room  %d -- will copy %d bytes\n",
-               __func__, port->number, count,
-                       edge_port->txCredits - fifo->count, copySize);
+       dev_dbg(&port->dev, "%s of %d byte(s) Fifo room  %d -- will copy %d bytes\n",
+               __func__, count, edge_port->txCredits - fifo->count, copySize);
 
        /* catch writes of 0 bytes which the tty driver likes to give us,
           and when txCredits is empty */
@@ -1216,9 +1215,8 @@ static void send_more_port_data(struct edgeport_serial *edge_serial,
        if (edge_port->write_in_progress ||
            !edge_port->open             ||
            (fifo->count == 0)) {
-               dev_dbg(dev, "%s(%d) EXIT - fifo %d, PendingWrite = %d\n",
-                       __func__, edge_port->port->number,
-                       fifo->count, edge_port->write_in_progress);
+               dev_dbg(dev, "%s EXIT - fifo %d, PendingWrite = %d\n",
+                       __func__, fifo->count, edge_port->write_in_progress);
                goto exit_send;
        }
 
@@ -1230,9 +1228,8 @@ static void send_more_port_data(struct edgeport_serial *edge_serial,
         * it's better to wait for more credits so we can do a larger write.
         */
        if (edge_port->txCredits < EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(edge_port->maxTxCredits, EDGE_FW_BULK_MAX_PACKET_SIZE)) {
-               dev_dbg(dev, "%s(%d) Not enough credit - fifo %d TxCredit %d\n",
-                       __func__, edge_port->port->number, fifo->count,
-                       edge_port->txCredits);
+               dev_dbg(dev, "%s Not enough credit - fifo %d TxCredit %d\n",
+                       __func__, fifo->count, edge_port->txCredits);
                goto exit_send;
        }
 
@@ -1256,10 +1253,8 @@ static void send_more_port_data(struct edgeport_serial *edge_serial,
                edge_port->write_in_progress = false;
                goto exit_send;
        }
-       buffer[0] = IOSP_BUILD_DATA_HDR1(edge_port->port->number
-                               - edge_port->port->serial->minor, count);
-       buffer[1] = IOSP_BUILD_DATA_HDR2(edge_port->port->number
-                               - edge_port->port->serial->minor, count);
+       buffer[0] = IOSP_BUILD_DATA_HDR1(edge_port->port->port_number, count);
+       buffer[1] = IOSP_BUILD_DATA_HDR2(edge_port->port->port_number, count);
 
        /* now copy our data */
        bytesleft =  fifo->size - fifo->tail;
@@ -1377,8 +1372,7 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
                                                edge_port->txfifo.count;
        spin_unlock_irqrestore(&edge_port->ep_lock, flags);
        if (num_chars) {
-               dev_dbg(&port->dev, "%s(port %d) - returns %d\n", __func__,
-                       port->number, num_chars);
+               dev_dbg(&port->dev, "%s - returns %d\n", __func__, num_chars);
        }
 
        return num_chars;
@@ -1575,8 +1569,8 @@ static int get_serial_info(struct edgeport_port *edge_port,
        memset(&tmp, 0, sizeof(tmp));
 
        tmp.type                = PORT_16550A;
-       tmp.line                = edge_port->port->serial->minor;
-       tmp.port                = edge_port->port->number;
+       tmp.line                = edge_port->port->minor;
+       tmp.port                = edge_port->port->port_number;
        tmp.irq                 = 0;
        tmp.flags               = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
        tmp.xmit_fifo_size      = edge_port->maxTxCredits;
@@ -1601,15 +1595,15 @@ static int edge_ioctl(struct tty_struct *tty,
        DEFINE_WAIT(wait);
        struct edgeport_port *edge_port = usb_get_serial_port_data(port);
 
-       dev_dbg(&port->dev, "%s - port %d, cmd = 0x%x\n", __func__, port->number, cmd);
+       dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
 
        switch (cmd) {
        case TIOCSERGETLSR:
-               dev_dbg(&port->dev, "%s (%d) TIOCSERGETLSR\n", __func__,  port->number);
+               dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
                return get_lsr_info(edge_port, (unsigned int __user *) arg);
 
        case TIOCGSERIAL:
-               dev_dbg(&port->dev, "%s (%d) TIOCGSERIAL\n", __func__,  port->number);
+               dev_dbg(&port->dev, "%s TIOCGSERIAL\n", __func__);
                return get_serial_info(edge_port, (struct serial_struct __user *) arg);
        }
        return -ENOIOCTLCMD;
@@ -2181,9 +2175,8 @@ static int send_iosp_ext_cmd(struct edgeport_port *edge_port,
 
        currentCommand = buffer;
 
-       MAKE_CMD_EXT_CMD(&currentCommand, &length,
-               edge_port->port->number - edge_port->port->serial->minor,
-               command, param);
+       MAKE_CMD_EXT_CMD(&currentCommand, &length, edge_port->port->port_number,
+                        command, param);
 
        status = write_cmd_usb(edge_port, buffer, length);
        if (status) {
@@ -2266,18 +2259,16 @@ static int send_cmd_write_baud_rate(struct edgeport_port *edge_port,
        int cmdLen = 0;
        int divisor;
        int status;
-       unsigned char number =
-               edge_port->port->number - edge_port->port->serial->minor;
+       u32 number = edge_port->port->port_number;
 
        if (edge_serial->is_epic &&
            !edge_serial->epic_descriptor.Supports.IOSPSetBaudRate) {
-               dev_dbg(dev, "SendCmdWriteBaudRate - NOT Setting baud rate for port = %d, baud = %d\n",
-                       edge_port->port->number, baudRate);
+               dev_dbg(dev, "SendCmdWriteBaudRate - NOT Setting baud rate for port, baud = %d\n",
+                       baudRate);
                return 0;
        }
 
-       dev_dbg(dev, "%s - port = %d, baud = %d\n", __func__,
-               edge_port->port->number, baudRate);
+       dev_dbg(dev, "%s - baud = %d\n", __func__, baudRate);
 
        status = calc_baud_rate_divisor(dev, baudRate, &divisor);
        if (status) {
@@ -2388,9 +2379,8 @@ static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
        currCmd = cmdBuffer;
 
        /* Build a cmd in the buffer to write the given register */
-       MAKE_CMD_WRITE_REG(&currCmd, &cmdLen,
-               edge_port->port->number - edge_port->port->serial->minor,
-               regNum, regValue);
+       MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, edge_port->port->port_number,
+                          regNum, regValue);
 
        status = write_cmd_usb(edge_port, cmdBuffer, cmdLen);
        if (status) {
@@ -2424,8 +2414,6 @@ static void change_port_settings(struct tty_struct *tty,
        __u8 txFlow;
        int status;
 
-       dev_dbg(dev, "%s - port %d\n", __func__, edge_port->port->number);
-
        if (!edge_port->open &&
            !edge_port->openPending) {
                dev_dbg(dev, "%s - port not opened\n", __func__);
index 1be6ba7bee27452ac55c08a06d98ff2394a2f7fb..60054e72b75ba1fc015a7d6fb778b02f104aae9d 100644 (file)
@@ -259,7 +259,7 @@ static int send_cmd(struct usb_device *dev, __u8 command,
 /* clear tx/rx buffers and fifo in TI UMP */
 static int purge_port(struct usb_serial_port *port, __u16 mask)
 {
-       int port_number = port->number - port->serial->minor;
+       int port_number = port->port_number;
 
        dev_dbg(&port->dev, "%s - port %d, mask %x\n", __func__, port_number, mask);
 
@@ -1392,7 +1392,8 @@ stayinbootmode:
 
 static int ti_do_config(struct edgeport_port *port, int feature, int on)
 {
-       int port_number = port->port->number - port->port->serial->minor;
+       int port_number = port->port->port_number;
+
        on = !!on;      /* 1 or 0 not bitmask */
        return send_cmd(port->port->serial->dev,
                        feature, (__u8)(UMPM_UART1_PORT + port_number),
@@ -1637,7 +1638,7 @@ static void edge_bulk_in_callback(struct urb *urb)
                return;
        }
 
-       port_number = edge_port->port->number - edge_port->port->serial->minor;
+       port_number = edge_port->port->port_number;
 
        if (edge_port->lsr_event) {
                edge_port->lsr_event = 0;
@@ -1730,7 +1731,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
        if (edge_port == NULL)
                return -ENODEV;
 
-       port_number = port->number - port->serial->minor;
+       port_number = port->port_number;
        switch (port_number) {
        case 0:
                edge_port->uart_base = UMPMEM_BASE_UART1;
@@ -1908,7 +1909,7 @@ static void edge_close(struct usb_serial_port *port)
        spin_unlock_irqrestore(&edge_port->ep_lock, flags);
 
        dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__);
-       port_number = port->number - port->serial->minor;
+       port_number = port->port_number;
        send_cmd(serial->dev, UMPC_CLOSE_PORT,
                     (__u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0);
 
@@ -2137,10 +2138,7 @@ static void change_port_settings(struct tty_struct *tty,
        int baud;
        unsigned cflag;
        int status;
-       int port_number = edge_port->port->number -
-                                       edge_port->port->serial->minor;
-
-       dev_dbg(dev, "%s - port %d\n", __func__, edge_port->port->number);
+       int port_number = edge_port->port->port_number;
 
        config = kmalloc (sizeof (*config), GFP_KERNEL);
        if (!config) {
@@ -2284,7 +2282,6 @@ static void edge_set_termios(struct tty_struct *tty,
                tty->termios.c_cflag, tty->termios.c_iflag);
        dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__,
                old_termios->c_cflag, old_termios->c_iflag);
-       dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
 
        if (edge_port == NULL)
                return;
@@ -2366,8 +2363,8 @@ static int get_serial_info(struct edgeport_port *edge_port,
        memset(&tmp, 0, sizeof(tmp));
 
        tmp.type                = PORT_16550A;
-       tmp.line                = edge_port->port->serial->minor;
-       tmp.port                = edge_port->port->number;
+       tmp.line                = edge_port->port->minor;
+       tmp.port                = edge_port->port->port_number;
        tmp.irq                 = 0;
        tmp.flags               = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
        tmp.xmit_fifo_size      = edge_port->port->bulk_out_size;
@@ -2386,7 +2383,7 @@ static int edge_ioctl(struct tty_struct *tty,
        struct usb_serial_port *port = tty->driver_data;
        struct edgeport_port *edge_port = usb_get_serial_port_data(port);
 
-       dev_dbg(&port->dev, "%s - port %d, cmd = 0x%x\n", __func__, port->number, cmd);
+       dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
 
        switch (cmd) {
        case TIOCGSERIAL:
index 3549d073df229617690f90d29b6c2e1f77648b64..5a979729f8ec4695a1fa4e4395b86a430184e1c9 100644 (file)
@@ -152,7 +152,7 @@ static void keyspan_set_termios(struct tty_struct *tty,
        p_priv = usb_get_serial_port_data(port);
        d_details = p_priv->device_details;
        cflag = tty->termios.c_cflag;
-       device_port = port->number - port->serial->minor;
+       device_port = port->port_number;
 
        /* Baud rate calculation takes baud rate as an integer
           so other rates can be generated if desired. */
@@ -234,8 +234,8 @@ static int keyspan_write(struct tty_struct *tty,
                dataOffset = 1;
        }
 
-       dev_dbg(&port->dev, "%s - for port %d (%d chars), flip=%d\n",
-               __func__, port->number, count, p_priv->out_flip);
+       dev_dbg(&port->dev, "%s - %d chars, flip=%d\n", __func__, count,
+               p_priv->out_flip);
 
        for (left = count; left > 0; left -= todo) {
                todo = left;
@@ -520,12 +520,7 @@ static void        usa28_instat_callback(struct urb *urb)
                goto exit;
        }
 
-       /*
-       dev_dbg(&urb->dev->dev,
-               "%s %x %x %x %x %x %x %x %x %x %x %x %x", __func__,
-               data[0], data[1], data[2], data[3], data[4], data[5],
-               data[6], data[7], data[8], data[9], data[10], data[11]);
-       */
+       /*dev_dbg(&urb->dev->dev, "%s %12ph", __func__, data);*/
 
        /* Now do something useful with the data */
        msg = (struct keyspan_usa28_portStatusMessage *)data;
@@ -607,11 +602,7 @@ static void        usa49_instat_callback(struct urb *urb)
                goto exit;
        }
 
-       /*
-       dev_dbg(&urb->dev->dev, "%s: %x %x %x %x %x %x %x %x %x %x %x",
-               __func__, data[0], data[1], data[2], data[3], data[4],
-               data[5], data[6], data[7], data[8], data[9], data[10]);
-       */
+       /*dev_dbg(&urb->dev->dev, "%s: %11ph", __func__, data);*/
 
        /* Now do something useful with the data */
        msg = (struct keyspan_usa49_portStatusMessage *)data;
@@ -1050,7 +1041,7 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
        /* get the terminal config for the setup message now so we don't
         * need to send 2 of them */
 
-       device_port = port->number - port->serial->minor;
+       device_port = port->port_number;
        if (tty) {
                cflag = tty->termios.c_cflag;
                /* Baud rate calculation takes baud rate as an integer
@@ -1556,7 +1547,7 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
        s_priv = usb_get_serial_data(serial);
        p_priv = usb_get_serial_port_data(port);
        d_details = s_priv->device_details;
-       device_port = port->number - port->serial->minor;
+       device_port = port->port_number;
 
        this_urb = p_priv->outcont_urb;
 
@@ -1700,7 +1691,7 @@ static int keyspan_usa28_send_setup(struct usb_serial *serial,
        s_priv = usb_get_serial_data(serial);
        p_priv = usb_get_serial_port_data(port);
        d_details = s_priv->device_details;
-       device_port = port->number - port->serial->minor;
+       device_port = port->port_number;
 
        /* only do something if we have a bulk out endpoint */
        this_urb = p_priv->outcont_urb;
@@ -1830,17 +1821,16 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
        this_urb = s_priv->glocont_urb;
 
        /* Work out which port within the device is being setup */
-       device_port = port->number - port->serial->minor;
+       device_port = port->port_number;
 
        /* Make sure we have an urb then send the message */
        if (this_urb == NULL) {
-               dev_dbg(&port->dev, "%s - oops no urb for port %d.\n", __func__, port->number);
+               dev_dbg(&port->dev, "%s - oops no urb for port.\n", __func__);
                return -1;
        }
 
-       dev_dbg(&port->dev, "%s - endpoint %d port %d (%d)\n",
-               __func__, usb_pipeendpoint(this_urb->pipe),
-               port->number, device_port);
+       dev_dbg(&port->dev, "%s - endpoint %d (%d)\n",
+               __func__, usb_pipeendpoint(this_urb->pipe), device_port);
 
        /* Save reset port val for resend.
           Don't overwrite resend for open/close condition. */
@@ -1855,7 +1845,6 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
 
        memset(&msg, 0, sizeof(struct keyspan_usa49_portControlMessage));
 
-       /*msg.portNumber = port->number;*/
        msg.portNumber = device_port;
 
        /* Only set baud rate if it's changed */
@@ -2145,12 +2134,11 @@ static int keyspan_usa67_send_setup(struct usb_serial *serial,
        this_urb = s_priv->glocont_urb;
 
        /* Work out which port within the device is being setup */
-       device_port = port->number - port->serial->minor;
+       device_port = port->port_number;
 
        /* Make sure we have an urb then send the message */
        if (this_urb == NULL) {
-               dev_dbg(&port->dev, "%s - oops no urb for port %d.\n", __func__,
-                       port->number);
+               dev_dbg(&port->dev, "%s - oops no urb for port.\n", __func__);
                return -1;
        }
 
@@ -2391,7 +2379,7 @@ static int keyspan_port_probe(struct usb_serial_port *port)
        /* Setup values for the various callback routines */
        cback = &keyspan_callbacks[d_details->msg_format];
 
-       port_num = port->number - port->serial->minor;
+       port_num = port->port_number;
 
        /* Do indat endpoints first, once for each flip */
        endp = d_details->indat_endpoints[port_num];
index 47e247759eb0ed975ddcca6482b34080525397d3..40ccf6e5e318bb8be43054d643af4376e4aa5a06 100644 (file)
@@ -224,8 +224,8 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
        result = metrousb_send_unidirectional_cmd(UNI_CMD_OPEN, port);
        if (result) {
                dev_err(&port->dev,
-                       "%s - failed to configure device for port number=%d, error code=%d\n",
-                       __func__, port->number, result);
+                       "%s - failed to configure device, error code=%d\n",
+                       __func__, result);
                goto exit;
        }
 
index f27c621a9297f3c896b84c9ea700d6285ef6b94f..51da424327b0f7c1f64ac90fe482df9e063548ea 100644 (file)
@@ -1047,7 +1047,7 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
          *
          * 0x08 : SP1/2 Control Reg
          */
-       port_number = port->number - port->serial->minor;
+       port_number = port->port_number;
        read_mos_reg(serial, port_number, LSR, &data);
 
        dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data);
@@ -1066,7 +1066,7 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
 
        write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00);
        read_mos_reg(serial, dummy, SP_CONTROL_REG, &data);
-       data = data | (port->number - port->serial->minor + 1);
+       data = data | (port->port_number + 1);
        write_mos_reg(serial, dummy, SP_CONTROL_REG, data);
        mos7720_port->shadowLCR = 0x83;
        write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
@@ -1147,8 +1147,8 @@ static void mos7720_close(struct usb_serial_port *port)
        usb_kill_urb(port->write_urb);
        usb_kill_urb(port->read_urb);
 
-       write_mos_reg(serial, port->number - port->serial->minor, MCR, 0x00);
-       write_mos_reg(serial, port->number - port->serial->minor, IER, 0x00);
+       write_mos_reg(serial, port->port_number, MCR, 0x00);
+       write_mos_reg(serial, port->port_number, IER, 0x00);
 
        mos7720_port->open = 0;
 }
@@ -1172,8 +1172,7 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
                data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
 
        mos7720_port->shadowLCR  = data;
-       write_mos_reg(serial, port->number - port->serial->minor,
-                     LCR, mos7720_port->shadowLCR);
+       write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR);
 }
 
 /*
@@ -1304,8 +1303,8 @@ static void mos7720_throttle(struct tty_struct *tty)
        /* if we are implementing RTS/CTS, toggle that line */
        if (tty->termios.c_cflag & CRTSCTS) {
                mos7720_port->shadowMCR &= ~UART_MCR_RTS;
-               write_mos_reg(port->serial, port->number - port->serial->minor,
-                             MCR, mos7720_port->shadowMCR);
+               write_mos_reg(port->serial, port->port_number, MCR,
+                             mos7720_port->shadowMCR);
                if (status != 0)
                        return;
        }
@@ -1336,8 +1335,8 @@ static void mos7720_unthrottle(struct tty_struct *tty)
        /* if we are implementing RTS/CTS, toggle that line */
        if (tty->termios.c_cflag & CRTSCTS) {
                mos7720_port->shadowMCR |= UART_MCR_RTS;
-               write_mos_reg(port->serial, port->number - port->serial->minor,
-                             MCR, mos7720_port->shadowMCR);
+               write_mos_reg(port->serial, port->port_number, MCR,
+                             mos7720_port->shadowMCR);
                if (status != 0)
                        return;
        }
@@ -1361,7 +1360,7 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
         *      Init Sequence for higher rates
         ***********************************************/
        dev_dbg(&port->dev, "Sending Setting Commands ..........\n");
-       port_number = port->number - port->serial->minor;
+       port_number = port->port_number;
 
        write_mos_reg(serial, port_number, IER, 0x00);
        write_mos_reg(serial, port_number, FCR, 0x00);
@@ -1487,7 +1486,7 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
        port = mos7720_port->port;
        serial = port->serial;
 
-       number = port->number - port->serial->minor;
+       number = port->port_number;
        dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudrate);
 
        /* Calculate the Divisor */
@@ -1538,7 +1537,7 @@ static void change_port_settings(struct tty_struct *tty,
 
        port = mos7720_port->port;
        serial = port->serial;
-       port_number = port->number - port->serial->minor;
+       port_number = port->port_number;
 
        if (!mos7720_port->open) {
                dev_dbg(&port->dev, "%s - port not opened\n", __func__);
@@ -1731,7 +1730,7 @@ static int get_lsr_info(struct tty_struct *tty,
        struct usb_serial_port *port = tty->driver_data;
        unsigned int result = 0;
        unsigned char data = 0;
-       int port_number = port->number - port->serial->minor;
+       int port_number = port->port_number;
        int count;
 
        count = mos7720_chars_in_buffer(tty);
@@ -1793,8 +1792,8 @@ static int mos7720_tiocmset(struct tty_struct *tty,
                mcr &= ~UART_MCR_LOOP;
 
        mos7720_port->shadowMCR = mcr;
-       write_mos_reg(port->serial, port->number - port->serial->minor,
-                     MCR, mos7720_port->shadowMCR);
+       write_mos_reg(port->serial, port->port_number, MCR,
+                     mos7720_port->shadowMCR);
 
        return 0;
 }
@@ -1838,8 +1837,8 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
        }
 
        mos7720_port->shadowMCR = mcr;
-       write_mos_reg(port->serial, port->number - port->serial->minor,
-                     MCR, mos7720_port->shadowMCR);
+       write_mos_reg(port->serial, port->port_number, MCR,
+                     mos7720_port->shadowMCR);
 
        return 0;
 }
@@ -1855,8 +1854,8 @@ static int get_serial_info(struct moschip_port *mos7720_port,
        memset(&tmp, 0, sizeof(tmp));
 
        tmp.type                = PORT_16550A;
-       tmp.line                = mos7720_port->port->serial->minor;
-       tmp.port                = mos7720_port->port->number;
+       tmp.line                = mos7720_port->port->minor;
+       tmp.port                = mos7720_port->port->port_number;
        tmp.irq                 = 0;
        tmp.flags               = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
        tmp.xmit_fifo_size      = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
index 7e998081e1cd9b42651143e7f01e63286410393d..0a818b238508e26ab4b1932a844b740ba25c5d3c 100644 (file)
@@ -303,15 +303,12 @@ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg,
        /* For the UART control registers, the application number need
           to be Or'ed */
        if (port->serial->num_ports == 4) {
-               val |= (((__u16) port->number -
-                               (__u16) (port->serial->minor)) + 1) << 8;
+               val |= ((__u16)port->port_number + 1) << 8;
        } else {
-               if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
-                       val |= (((__u16) port->number -
-                             (__u16) (port->serial->minor)) + 1) << 8;
+               if (port->port_number == 0) {
+                       val |= ((__u16)port->port_number + 1) << 8;
                } else {
-                       val |= (((__u16) port->number -
-                             (__u16) (port->serial->minor)) + 2) << 8;
+                       val |= ((__u16)port->port_number + 2) << 8;
                }
        }
        dev_dbg(&port->dev, "%s application number is %x\n", __func__, val);
@@ -340,16 +337,12 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
 
        /* Wval  is same as application number */
        if (port->serial->num_ports == 4) {
-               Wval =
-                   (((__u16) port->number - (__u16) (port->serial->minor)) +
-                    1) << 8;
+               Wval = ((__u16)port->port_number + 1) << 8;
        } else {
-               if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
-                       Wval = (((__u16) port->number -
-                             (__u16) (port->serial->minor)) + 1) << 8;
+               if (port->port_number == 0) {
+                       Wval = ((__u16)port->port_number + 1) << 8;
                } else {
-                       Wval = (((__u16) port->number -
-                             (__u16) (port->serial->minor)) + 2) << 8;
+                       Wval = ((__u16)port->port_number + 2) << 8;
                }
        }
        dev_dbg(&port->dev, "%s application number is %x\n", __func__, Wval);
@@ -631,9 +624,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
 
        for (i = 0; i < serial->num_ports; i++) {
                mos7840_port = mos7840_get_port_private(serial->port[i]);
-               wval =
-                   (((__u16) serial->port[i]->number -
-                     (__u16) (serial->minor)) + 1) << 8;
+               wval = ((__u16)serial->port[i]->port_number + 1) << 8;
                if (mos7840_port->open) {
                        if (sp[i] & 0x01) {
                                dev_dbg(&urb->dev->dev, "SP%d No Interrupt !!!\n", i);
@@ -1065,8 +1056,8 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
         * (can't set it up in mos7840_startup as the  *
         * structures were not set up at that time.)   */
 
-       dev_dbg(&port->dev, "port number is %d\n", port->number);
-       dev_dbg(&port->dev, "serial number is %d\n", port->serial->minor);
+       dev_dbg(&port->dev, "port number is %d\n", port->port_number);
+       dev_dbg(&port->dev, "minor number is %d\n", port->minor);
        dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
        dev_dbg(&port->dev, "BulkOut endpoint is %d\n", port->bulk_out_endpointAddress);
        dev_dbg(&port->dev, "Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress);
@@ -1074,9 +1065,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
        mos7840_port->read_urb = port->read_urb;
 
        /* set up our bulk in urb */
-       if ((serial->num_ports == 2)
-               && ((((__u16)port->number -
-                       (__u16)(port->serial->minor)) % 2) != 0)) {
+       if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) {
                usb_fill_bulk_urb(mos7840_port->read_urb,
                        serial->dev,
                        usb_rcvbulkpipe(serial->dev,
@@ -1199,7 +1188,7 @@ static void mos7840_close(struct usb_serial_port *port)
        mos7840_port->read_urb_busy = false;
 
        port0->open_ports--;
-       dev_dbg(&port->dev, "%s in close%d:in port%d\n", __func__, port0->open_ports, port->number);
+       dev_dbg(&port->dev, "%s in close%d\n", __func__, port0->open_ports);
        if (port0->open_ports == 0) {
                if (serial->port[0]->interrupt_in_urb) {
                        dev_dbg(&port->dev, "Shutdown interrupt_in_urb\n");
@@ -1435,9 +1424,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        memcpy(urb->transfer_buffer, current_position, transfer_size);
 
        /* fill urb with data and submit  */
-       if ((serial->num_ports == 2)
-               && ((((__u16)port->number -
-                       (__u16)(port->serial->minor)) % 2) != 0)) {
+       if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) {
                usb_fill_bulk_urb(urb,
                        serial->dev,
                        usb_sndbulkpipe(serial->dev,
@@ -1732,10 +1719,9 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
        if (mos7840_serial_paranoia_check(port->serial, __func__))
                return -1;
 
-       number = mos7840_port->port->number - mos7840_port->port->serial->minor;
+       number = mos7840_port->port->port_number;
 
-       dev_dbg(&port->dev, "%s - port = %d, baud = %d\n", __func__,
-               mos7840_port->port->number, baudRate);
+       dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate);
        /* reset clk_uart_sel in spregOffset */
        if (baudRate > 115200) {
 #ifdef HW_flow_control
@@ -2016,7 +2002,6 @@ static void mos7840_set_termios(struct tty_struct *tty,
                tty->termios.c_cflag, RELEVANT_IFLAG(tty->termios.c_iflag));
        dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__,
                old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag));
-       dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
 
        /* change the port settings to the new ones specified */
 
@@ -2083,8 +2068,8 @@ static int mos7840_get_serial_info(struct moschip_port *mos7840_port,
        memset(&tmp, 0, sizeof(tmp));
 
        tmp.type = PORT_16550A;
-       tmp.line = mos7840_port->port->serial->minor;
-       tmp.port = mos7840_port->port->number;
+       tmp.line = mos7840_port->port->minor;
+       tmp.port = mos7840_port->port->port_number;
        tmp.irq = 0;
        tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
        tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
@@ -2240,7 +2225,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
        /* we set up the pointers to the endpoints in the mos7840_open *
         * function, as the structures aren't created yet.             */
 
-       pnum = port->number - serial->minor;
+       pnum = port->port_number;
 
        dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum);
        mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
@@ -2261,10 +2246,8 @@ static int mos7840_port_probe(struct usb_serial_port *port)
         * usb-serial.c:get_free_serial() and cannot therefore be used
         * to index device instances */
        mos7840_port->port_num = pnum + 1;
-       dev_dbg(&port->dev, "port->number = %d\n", port->number);
-       dev_dbg(&port->dev, "port->serial->minor = %d\n", port->serial->minor);
+       dev_dbg(&port->dev, "port->minor = %d\n", port->minor);
        dev_dbg(&port->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num);
-       dev_dbg(&port->dev, "serial->minor = %d\n", serial->minor);
 
        if (mos7840_port->port_num == 1) {
                mos7840_port->SpRegOffset = 0x0;
index 5f4b0cd0f6e9734193dac84ba23373f13e301d65..cbe779f578f9be8c1d5b1f0dfc95c1cc165a1e84 100644 (file)
@@ -348,7 +348,7 @@ static int get_serial_info(struct usb_serial_port *port,
 
        /* fake emulate a 16550 uart to make userspace code happy */
        tmp.type                = PORT_16550A;
-       tmp.line                = port->serial->minor;
+       tmp.line                = port->minor;
        tmp.port                = 0;
        tmp.irq                 = 0;
        tmp.flags               = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
@@ -367,7 +367,7 @@ static int opticon_ioctl(struct tty_struct *tty,
 {
        struct usb_serial_port *port = tty->driver_data;
 
-       dev_dbg(&port->dev, "%s - port %d, cmd = 0x%x\n", __func__, port->number, cmd);
+       dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
 
        switch (cmd) {
        case TIOCGSERIAL:
index bd4323ddae1aa19002b92998d52d00b9bd13a7cf..5dd857de05b04f5ecf4cefe7a370e6013db67118 100644 (file)
@@ -159,8 +159,6 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED        0x9000
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED        0x9001
 #define NOVATELWIRELESS_PRODUCT_E362           0x9010
-#define NOVATELWIRELESS_PRODUCT_G1             0xA001
-#define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
 #define NOVATELWIRELESS_PRODUCT_MC551          0xB001
 
@@ -730,8 +728,6 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
        /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
        { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
index 048cd44d51b189352aea40e4275ebb0124e49344..cb6bbed374f26ffd25b69442bc8fcd9356b8889d 100644 (file)
@@ -275,7 +275,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
        u8 control;
        const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
                                 4800, 7200, 9600, 14400, 19200, 28800, 38400,
-                                57600, 115200, 230400, 460800, 614400,
+                                57600, 115200, 230400, 460800, 500000, 614400,
                                 921600, 1228800, 2457600, 3000000, 6000000 };
        int baud_floor, baud_ceil;
        int k;
@@ -301,8 +301,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
        i = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
                            GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
                            0, 0, buf, 7, 100);
-       dev_dbg(&port->dev, "0xa1:0x21:0:0  %d - %x %x %x %x %x %x %x\n", i,
-           buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+       dev_dbg(&port->dev, "0xa1:0x21:0:0  %d - %7ph\n", i, buf);
 
        if (cflag & CSIZE) {
                switch (cflag & CSIZE) {
@@ -449,8 +448,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
        i = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
                            GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
                            0, 0, buf, 7, 100);
-       dev_dbg(&port->dev, "0xa1:0x21:0:0  %d - %x %x %x %x %x %x %x\n", i,
-            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+       dev_dbg(&port->dev, "0xa1:0x21:0:0  %d - %7ph\n", i, buf);
 
        if (cflag & CRTSCTS) {
                if (spriv->type == HX)
@@ -641,8 +639,8 @@ static int pl2303_ioctl(struct tty_struct *tty,
        case TIOCGSERIAL:
                memset(&ser, 0, sizeof ser);
                ser.type = PORT_16654;
-               ser.line = port->serial->minor;
-               ser.port = port->number;
+               ser.line = port->minor;
+               ser.port = port->port_number;
                ser.baud_base = 460800;
 
                if (copy_to_user((void __user *)arg, &ser, sizeof ser))
index bd794b43898cce03ef4ac6fb2c71ecc6563bc4c0..c65437cfd4a276a71ffe1e3bede20306c18fe75f 100644 (file)
@@ -35,7 +35,13 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_G1K(0x04da, 0x250c)},   /* Panasonic Gobi QDL device */
        {DEVICE_G1K(0x413c, 0x8172)},   /* Dell Gobi Modem device */
        {DEVICE_G1K(0x413c, 0x8171)},   /* Dell Gobi QDL device */
-       {DEVICE_G1K(0x1410, 0xa001)},   /* Novatel Gobi Modem device */
+       {DEVICE_G1K(0x1410, 0xa001)},   /* Novatel/Verizon USB-1000 */
+       {DEVICE_G1K(0x1410, 0xa002)},   /* Novatel Gobi Modem device */
+       {DEVICE_G1K(0x1410, 0xa003)},   /* Novatel Gobi Modem device */
+       {DEVICE_G1K(0x1410, 0xa004)},   /* Novatel Gobi Modem device */
+       {DEVICE_G1K(0x1410, 0xa005)},   /* Novatel Gobi Modem device */
+       {DEVICE_G1K(0x1410, 0xa006)},   /* Novatel Gobi Modem device */
+       {DEVICE_G1K(0x1410, 0xa007)},   /* Novatel Gobi Modem device */
        {DEVICE_G1K(0x1410, 0xa008)},   /* Novatel Gobi QDL device */
        {DEVICE_G1K(0x0b05, 0x1776)},   /* Asus Gobi Modem device */
        {DEVICE_G1K(0x0b05, 0x1774)},   /* Asus Gobi QDL device */
index 02b0803425c5d01d82f4b8d498a78c128bdfd268..d99743290fc13c2e81feec43c8bcc5623b4c7c11 100644 (file)
@@ -343,7 +343,7 @@ static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port)
        int status;
        unsigned long flags;
 
-       device_port = (u16) (port->number - port->serial->minor);
+       device_port = port->port_number;
 
        serial = port->serial;
 
@@ -388,9 +388,8 @@ static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port)
        status = qt2_set_port_config(serial->dev, device_port,
                                     DEFAULT_BAUD_RATE, UART_LCR_WLEN8);
        if (status < 0) {
-               dev_err(&port->dev,
-                       "%s - initial setup failed for port %i (%i)\n",
-                       __func__, port->number, device_port);
+               dev_err(&port->dev, "%s - initial setup failed (%i)\n",
+                       __func__, device_port);
                return status;
        }
 
@@ -466,7 +465,7 @@ static int get_serial_info(struct usb_serial_port *port,
                return -EFAULT;
 
        memset(&tmp, 0, sizeof(tmp));
-       tmp.line                = port->serial->minor;
+       tmp.line                = port->minor;
        tmp.port                = 0;
        tmp.irq                 = 0;
        tmp.flags               = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
@@ -523,7 +522,7 @@ static void qt2_process_flush(struct usb_serial_port *port, unsigned char *ch)
        return;
 }
 
-void qt2_process_read_urb(struct urb *urb)
+static void qt2_process_read_urb(struct urb *urb)
 {
        struct usb_serial *serial;
        struct qt2_serial_private *serial_priv;
index 8894665cd6102cfe756ea2a86f46176010483444..de958c5b52e33ea5b7c3ff23efe42d33322bdeb4 100644 (file)
@@ -914,7 +914,7 @@ static int sierra_port_probe(struct usb_serial_port *port)
                /* This is really the usb-serial port number of the interface
                 * rather than the interface number.
                 */
-               ifnum = port->number - serial->minor;
+               ifnum = port->port_number;
                himemoryp = &typeA_interface_list;
        }
 
index 5b62dbbdf996b53bcb4f82ea6966dae7c2fa5801..e5750be49054397ecb782066a913ae7a05e68c5a 100644 (file)
@@ -323,7 +323,7 @@ static int get_serial_info(struct usb_serial_port *port,
                return -EFAULT;
 
        memset(&tmp, 0, sizeof(tmp));
-       tmp.line                = port->serial->minor;
+       tmp.line                = port->minor;
        tmp.port                = 0;
        tmp.irq                 = 0;
        tmp.flags               = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
index c92c5ed4e580ec6761d0acabc379cfe667faaeb1..7182bb774b7958f70f50efb823b084c5ea9284d5 100644 (file)
@@ -172,7 +172,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
        { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
-       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
 };
 
@@ -476,7 +477,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
        if (mutex_lock_interruptible(&tdev->td_open_close_lock))
                return -ERESTARTSYS;
 
-       port_number = port->number - port->serial->minor;
+       port_number = port->port_number;
 
        tport->tp_msr = 0;
        tport->tp_shadow_mcr |= (TI_MCR_RTS | TI_MCR_DTR);
@@ -618,7 +619,7 @@ static void ti_close(struct usb_serial_port *port)
        kfifo_reset_out(&tport->write_fifo);
        spin_unlock_irqrestore(&tport->tp_lock, flags);
 
-       port_number = port->number - port->serial->minor;
+       port_number = port->port_number;
 
        dev_dbg(&port->dev, "%s - sending TI_CLOSE_PORT\n", __func__);
        status = ti_command_out_sync(tdev, TI_CLOSE_PORT,
@@ -776,7 +777,7 @@ static void ti_set_termios(struct tty_struct *tty,
        tcflag_t cflag, iflag;
        int baud;
        int status;
-       int port_number = port->number - port->serial->minor;
+       int port_number = port->port_number;
        unsigned int mcr;
 
        cflag = tty->termios.c_cflag;
@@ -1262,7 +1263,7 @@ static int ti_get_lsr(struct ti_port *tport, u8 *lsr)
        int size, status;
        struct ti_device *tdev = tport->tp_tdev;
        struct usb_serial_port *port = tport->tp_port;
-       int port_number = port->number - port->serial->minor;
+       int port_number = port->port_number;
        struct ti_port_status *data;
 
        size = sizeof(struct ti_port_status);
@@ -1308,8 +1309,8 @@ static int ti_get_serial_info(struct ti_port *tport,
        memset(&ret_serial, 0, sizeof(ret_serial));
 
        ret_serial.type = PORT_16550A;
-       ret_serial.line = port->serial->minor;
-       ret_serial.port = port->number - port->serial->minor;
+       ret_serial.line = port->minor;
+       ret_serial.port = port->port_number;
        ret_serial.flags = tport->tp_flags;
        ret_serial.xmit_fifo_size = TI_WRITE_BUF_SIZE;
        ret_serial.baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800;
index b353e7e3d4809d17478478eae47fa2cc75a0d5a6..4a2423e84d55820b594ca43ed99fc8ae88b2f002 100644 (file)
@@ -52,7 +52,9 @@
 
 /* Abbott Diabetics vendor and product ids */
 #define ABBOTT_VENDOR_ID               0x1a61
-#define ABBOTT_PRODUCT_ID              0x3410
+#define ABBOTT_STEREO_PLUG_ID          0x3410
+#define ABBOTT_PRODUCT_ID              ABBOTT_STEREO_PLUG_ID
+#define ABBOTT_STRIP_PORT_ID           0x3420
 
 /* Commands */
 #define TI_GET_VERSION                 0x01
index 5f6b1ff9d29e6c4166213d04263b83aca0a51a52..cb27fcb2fc905fddb23e4ef7021ed2a20377132e 100644 (file)
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
 #include <linux/kfifo.h>
+#include <linux/idr.h>
 #include "pl2303.h"
 
 #define DRIVER_AUTHOR "Greg Kroah-Hartman <gregkh@linuxfoundation.org>"
 #define DRIVER_DESC "USB Serial Driver core"
 
+#define USB_SERIAL_TTY_MAJOR   188
+#define USB_SERIAL_TTY_MINORS  512     /* should be enough for a while */
+
 /* There is no MODULE_DEVICE_TABLE for usbserial.c.  Instead
    the MODULE_DEVICE_TABLE declarations in each serial driver
    cause the "hotplug" program to pull in whatever module is necessary
    drivers depend on it.
 */
 
-static struct usb_serial *serial_table[SERIAL_TTY_MINORS];
+static DEFINE_IDR(serial_minors);
 static DEFINE_MUTEX(table_lock);
 static LIST_HEAD(usb_serial_driver_list);
 
 /*
- * Look up the serial structure.  If it is found and it hasn't been
- * disconnected, return with its disc_mutex held and its refcount
- * incremented.  Otherwise return NULL.
+ * Look up the serial port structure.  If it is found and it hasn't been
+ * disconnected, return with the parent usb_serial structure's disc_mutex held
+ * and its refcount incremented.  Otherwise return NULL.
  */
-struct usb_serial *usb_serial_get_by_index(unsigned index)
+struct usb_serial_port *usb_serial_port_get_by_minor(unsigned minor)
 {
        struct usb_serial *serial;
+       struct usb_serial_port *port;
 
        mutex_lock(&table_lock);
-       serial = serial_table[index];
-
-       if (serial) {
-               mutex_lock(&serial->disc_mutex);
-               if (serial->disconnected) {
-                       mutex_unlock(&serial->disc_mutex);
-                       serial = NULL;
-               } else {
-                       kref_get(&serial->kref);
-               }
+       port = idr_find(&serial_minors, minor);
+       if (!port)
+               goto exit;
+
+       serial = port->serial;
+       mutex_lock(&serial->disc_mutex);
+       if (serial->disconnected) {
+               mutex_unlock(&serial->disc_mutex);
+               port = NULL;
+       } else {
+               kref_get(&serial->kref);
        }
+exit:
        mutex_unlock(&table_lock);
-       return serial;
+       return port;
 }
 
-static struct usb_serial *get_free_serial(struct usb_serial *serial,
-                                       int num_ports, unsigned int *minor)
+static int allocate_minors(struct usb_serial *serial, int num_ports)
 {
+       struct usb_serial_port *port;
        unsigned int i, j;
-       int good_spot;
+       int minor;
 
        dev_dbg(&serial->interface->dev, "%s %d\n", __func__, num_ports);
 
-       *minor = 0;
        mutex_lock(&table_lock);
-       for (i = 0; i < SERIAL_TTY_MINORS; ++i) {
-               if (serial_table[i])
-                       continue;
-
-               good_spot = 1;
-               for (j = 1; j <= num_ports-1; ++j)
-                       if ((i+j >= SERIAL_TTY_MINORS) || (serial_table[i+j])) {
-                               good_spot = 0;
-                               i += j;
-                               break;
-                       }
-               if (good_spot == 0)
-                       continue;
-
-               *minor = i;
-               j = 0;
-               dev_dbg(&serial->interface->dev, "%s - minor base = %d\n", __func__, *minor);
-               for (i = *minor; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i) {
-                       serial_table[i] = serial;
-                       serial->port[j++]->number = i;
-               }
-               mutex_unlock(&table_lock);
-               return serial;
+       for (i = 0; i < num_ports; ++i) {
+               port = serial->port[i];
+               minor = idr_alloc(&serial_minors, port, 0, 0, GFP_KERNEL);
+               if (minor < 0)
+                       goto error;
+               port->minor = minor;
+               port->port_number = i;
        }
+       serial->minors_reserved = 1;
        mutex_unlock(&table_lock);
-       return NULL;
+       return 0;
+error:
+       /* unwind the already allocated minors */
+       for (j = 0; j < i; ++j)
+               idr_remove(&serial_minors, serial->port[j]->minor);
+       mutex_unlock(&table_lock);
+       return minor;
 }
 
-static void return_serial(struct usb_serial *serial)
+static void release_minors(struct usb_serial *serial)
 {
        int i;
 
        mutex_lock(&table_lock);
        for (i = 0; i < serial->num_ports; ++i)
-               serial_table[serial->minor + i] = NULL;
+               idr_remove(&serial_minors, serial->port[i]->minor);
        mutex_unlock(&table_lock);
+       serial->minors_reserved = 0;
 }
 
 static void destroy_serial(struct kref *kref)
@@ -135,8 +133,8 @@ static void destroy_serial(struct kref *kref)
        serial = to_usb_serial(kref);
 
        /* return the minor range that this device had */
-       if (serial->minor != SERIAL_TTY_NO_MINOR)
-               return_serial(serial);
+       if (serial->minors_reserved)
+               release_minors(serial);
 
        if (serial->attached && serial->type->release)
                serial->type->release(serial);
@@ -185,13 +183,11 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
        struct usb_serial_port *port;
        int retval = -ENODEV;
 
-       serial = usb_serial_get_by_index(idx);
-       if (!serial)
+       port = usb_serial_port_get_by_minor(idx);
+       if (!port)
                return retval;
 
-       port = serial->port[idx - serial->minor];
-       if (!port)
-               goto error_no_port;
+       serial = port->serial;
        if (!try_module_get(serial->type->driver.owner))
                goto error_module_get;
 
@@ -218,7 +214,6 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
  error_get_interface:
        module_put(serial->type->driver.owner);
  error_module_get:
- error_no_port:
        usb_serial_put(serial);
        mutex_unlock(&serial->disc_mutex);
        return retval;
@@ -452,14 +447,16 @@ static int serial_break(struct tty_struct *tty, int break_state)
 static int serial_proc_show(struct seq_file *m, void *v)
 {
        struct usb_serial *serial;
+       struct usb_serial_port *port;
        int i;
        char tmp[40];
 
        seq_puts(m, "usbserinfo:1.0 driver:2.0\n");
-       for (i = 0; i < SERIAL_TTY_MINORS; ++i) {
-               serial = usb_serial_get_by_index(i);
-               if (serial == NULL)
+       for (i = 0; i < USB_SERIAL_TTY_MINORS; ++i) {
+               port = usb_serial_port_get_by_minor(i);
+               if (port == NULL)
                        continue;
+               serial = port->serial;
 
                seq_printf(m, "%d:", i);
                if (serial->type->driver.owner)
@@ -471,7 +468,7 @@ static int serial_proc_show(struct seq_file *m, void *v)
                        le16_to_cpu(serial->dev->descriptor.idVendor),
                        le16_to_cpu(serial->dev->descriptor.idProduct));
                seq_printf(m, " num_ports:%d", serial->num_ports);
-               seq_printf(m, " port:%d", i - serial->minor + 1);
+               seq_printf(m, " port:%d", port->port_number);
                usb_make_path(serial->dev, tmp, sizeof(tmp));
                seq_printf(m, " path:%s", tmp);
 
@@ -613,7 +610,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
        serial->interface = usb_get_intf(interface);
        kref_init(&serial->kref);
        mutex_init(&serial->disc_mutex);
-       serial->minor = SERIAL_TTY_NO_MINOR;
+       serial->minors_reserved = 0;
 
        return serial;
 }
@@ -722,7 +719,6 @@ static int usb_serial_probe(struct usb_interface *interface,
        struct usb_endpoint_descriptor *bulk_out_endpoint[MAX_NUM_PORTS];
        struct usb_serial_driver *type = NULL;
        int retval;
-       unsigned int minor;
        int buffer_size;
        int i;
        int j;
@@ -1039,16 +1035,15 @@ static int usb_serial_probe(struct usb_interface *interface,
         */
        serial->disconnected = 1;
 
-       if (get_free_serial(serial, num_ports, &minor) == NULL) {
-               dev_err(ddev, "No more free serial devices\n");
+       if (allocate_minors(serial, num_ports)) {
+               dev_err(ddev, "No more free serial minor numbers\n");
                goto probe_error;
        }
-       serial->minor = minor;
 
        /* register all of the individual ports with the driver core */
        for (i = 0; i < num_ports; ++i) {
                port = serial->port[i];
-               dev_set_name(&port->dev, "ttyUSB%d", port->number);
+               dev_set_name(&port->dev, "ttyUSB%d", port->minor);
                dev_dbg(ddev, "registering %s", dev_name(&port->dev));
                device_enable_async_suspend(&port->dev);
 
@@ -1059,7 +1054,7 @@ static int usb_serial_probe(struct usb_interface *interface,
 
        serial->disconnected = 0;
 
-       usb_serial_console_init(minor);
+       usb_serial_console_init(serial->port[0]->minor);
 exit:
        module_put(type->driver.owner);
        return 0;
@@ -1223,17 +1218,13 @@ static struct usb_driver usb_serial_driver = {
 
 static int __init usb_serial_init(void)
 {
-       int i;
        int result;
 
-       usb_serial_tty_driver = alloc_tty_driver(SERIAL_TTY_MINORS);
+       usb_serial_tty_driver = alloc_tty_driver(USB_SERIAL_TTY_MINORS);
        if (!usb_serial_tty_driver)
                return -ENOMEM;
 
        /* Initialize our global data */
-       for (i = 0; i < SERIAL_TTY_MINORS; ++i)
-               serial_table[i] = NULL;
-
        result = bus_register(&usb_serial_bus_type);
        if (result) {
                pr_err("%s - registering bus driver failed\n", __func__);
@@ -1242,7 +1233,7 @@ static int __init usb_serial_init(void)
 
        usb_serial_tty_driver->driver_name = "usbserial";
        usb_serial_tty_driver->name = "ttyUSB";
-       usb_serial_tty_driver->major = SERIAL_TTY_MAJOR;
+       usb_serial_tty_driver->major = USB_SERIAL_TTY_MAJOR;
        usb_serial_tty_driver->minor_start = 0;
        usb_serial_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
        usb_serial_tty_driver->subtype = SERIAL_TYPE_NORMAL;
index ece326ef63a0b64c831e5f541b7171b95a9761f6..8257d30c40720f348de427ef4d04ea3d1835d03a 100644 (file)
@@ -124,8 +124,8 @@ static int get_serial_info(struct usb_serial_port *port,
                return -EFAULT;
 
        memset(&tmp, 0, sizeof(tmp));
-       tmp.line            = port->serial->minor;
-       tmp.port            = port->number;
+       tmp.line            = port->minor;
+       tmp.port            = port->port_number;
        tmp.baud_base       = tty_get_baud_rate(port->port.tty);
        tmp.close_delay     = port->port.close_delay / 10;
        tmp.closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
index 347caad47a121d3f7a26ec7f3246a6960114ffc2..36a7740e827c997ebbc156bfcb6e908cb02d1804 100644 (file)
@@ -461,8 +461,8 @@ static int whiteheat_ioctl(struct tty_struct *tty,
        case TIOCGSERIAL:
                memset(&serstruct, 0, sizeof(serstruct));
                serstruct.type = PORT_16654;
-               serstruct.line = port->serial->minor;
-               serstruct.port = port->number;
+               serstruct.line = port->minor;
+               serstruct.port = port->port_number;
                serstruct.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
                serstruct.xmit_fifo_size = kfifo_size(&port->write_fifo);
                serstruct.custom_divisor = 0;
@@ -626,7 +626,7 @@ static int firm_open(struct usb_serial_port *port)
 {
        struct whiteheat_simple open_command;
 
-       open_command.port = port->number - port->serial->minor + 1;
+       open_command.port = port->port_number + 1;
        return firm_send_command(port, WHITEHEAT_OPEN,
                (__u8 *)&open_command, sizeof(open_command));
 }
@@ -636,7 +636,7 @@ static int firm_close(struct usb_serial_port *port)
 {
        struct whiteheat_simple close_command;
 
-       close_command.port = port->number - port->serial->minor + 1;
+       close_command.port = port->port_number + 1;
        return firm_send_command(port, WHITEHEAT_CLOSE,
                        (__u8 *)&close_command, sizeof(close_command));
 }
@@ -649,7 +649,7 @@ static void firm_setup_port(struct tty_struct *tty)
        struct whiteheat_port_settings port_settings;
        unsigned int cflag = tty->termios.c_cflag;
 
-       port_settings.port = port->number - port->serial->minor + 1;
+       port_settings.port = port->port_number + 1;
 
        /* get the byte size */
        switch (cflag & CSIZE) {
@@ -726,7 +726,7 @@ static int firm_set_rts(struct usb_serial_port *port, __u8 onoff)
 {
        struct whiteheat_set_rdb rts_command;
 
-       rts_command.port = port->number - port->serial->minor + 1;
+       rts_command.port = port->port_number + 1;
        rts_command.state = onoff;
        return firm_send_command(port, WHITEHEAT_SET_RTS,
                        (__u8 *)&rts_command, sizeof(rts_command));
@@ -737,7 +737,7 @@ static int firm_set_dtr(struct usb_serial_port *port, __u8 onoff)
 {
        struct whiteheat_set_rdb dtr_command;
 
-       dtr_command.port = port->number - port->serial->minor + 1;
+       dtr_command.port = port->port_number + 1;
        dtr_command.state = onoff;
        return firm_send_command(port, WHITEHEAT_SET_DTR,
                        (__u8 *)&dtr_command, sizeof(dtr_command));
@@ -748,7 +748,7 @@ static int firm_set_break(struct usb_serial_port *port, __u8 onoff)
 {
        struct whiteheat_set_rdb break_command;
 
-       break_command.port = port->number - port->serial->minor + 1;
+       break_command.port = port->port_number + 1;
        break_command.state = onoff;
        return firm_send_command(port, WHITEHEAT_SET_BREAK,
                        (__u8 *)&break_command, sizeof(break_command));
@@ -759,7 +759,7 @@ static int firm_purge(struct usb_serial_port *port, __u8 rxtx)
 {
        struct whiteheat_purge purge_command;
 
-       purge_command.port = port->number - port->serial->minor + 1;
+       purge_command.port = port->port_number + 1;
        purge_command.what = rxtx;
        return firm_send_command(port, WHITEHEAT_PURGE,
                        (__u8 *)&purge_command, sizeof(purge_command));
@@ -770,7 +770,7 @@ static int firm_get_dtr_rts(struct usb_serial_port *port)
 {
        struct whiteheat_simple get_dr_command;
 
-       get_dr_command.port = port->number - port->serial->minor + 1;
+       get_dr_command.port = port->port_number + 1;
        return firm_send_command(port, WHITEHEAT_GET_DTR_RTS,
                        (__u8 *)&get_dr_command, sizeof(get_dr_command));
 }
@@ -780,7 +780,7 @@ static int firm_report_tx_done(struct usb_serial_port *port)
 {
        struct whiteheat_simple close_command;
 
-       close_command.port = port->number - port->serial->minor + 1;
+       close_command.port = port->port_number + 1;
        return firm_send_command(port, WHITEHEAT_REPORT_TX_DONE,
                        (__u8 *)&close_command, sizeof(close_command));
 }
index 77a2ddfe64875ce83644db2a462ec17465e3ea2a..6636a583da126bff3138d5a9d3a711224e5974d6 100644 (file)
@@ -249,11 +249,7 @@ static void nand_init_ecc(void) {
 /* compute 3-byte ecc on 256 bytes */
 static void nand_compute_ecc(unsigned char *data, unsigned char *ecc) {
        int i, j, a;
-       unsigned char par, bit, bits[8];
-
-       par = 0;
-       for (j = 0; j < 8; j++)
-               bits[j] = 0;
+       unsigned char par = 0, bit, bits[8] = {0};
 
        /* collect 16 checksum bits */
        for (i = 0; i < 256; i++) {
index 732027f3320020ba88d6f3bc1a9c65accf717c39..073a2c32ccc4eab336dd064e3bfb51406519698f 100644 (file)
@@ -219,11 +219,7 @@ static void nand_init_ecc(void) {
 /* compute 3-byte ecc on 256 bytes */
 static void nand_compute_ecc(unsigned char *data, unsigned char *ecc) {
        int i, j, a;
-       unsigned char par, bit, bits[8];
-
-       par = 0;
-       for (j = 0; j < 8; j++)
-               bits[j] = 0;
+       unsigned char par = 0, bit, bits[8] = {0};
 
        /* collect 16 checksum bits */
        for (i = 0; i < 256; i++) {
index 0db0a919d72b2a0a7460f2e6fc7aeee639aebf5c..675384dabfe987f59d2e8678875e4801ceda488b 100644 (file)
@@ -13,7 +13,9 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/usb/ch9.h>
+#include <linux/usb/of.h>
 #include <linux/usb/otg.h>
 
 const char *usb_otg_state_string(enum usb_otg_state state)
@@ -79,4 +81,37 @@ const char *usb_state_string(enum usb_device_state state)
 }
 EXPORT_SYMBOL_GPL(usb_state_string);
 
+#ifdef CONFIG_OF
+static const char *const usb_dr_modes[] = {
+       [USB_DR_MODE_UNKNOWN]           = "",
+       [USB_DR_MODE_HOST]              = "host",
+       [USB_DR_MODE_PERIPHERAL]        = "peripheral",
+       [USB_DR_MODE_OTG]               = "otg",
+};
+
+/**
+ * of_usb_get_dr_mode - Get dual role mode for given device_node
+ * @np:        Pointer to the given device_node
+ *
+ * The function gets phy interface string from property 'dr_mode',
+ * and returns the correspondig enum usb_dr_mode
+ */
+enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
+{
+       const char *dr_mode;
+       int err, i;
+
+       err = of_property_read_string(np, "dr_mode", &dr_mode);
+       if (err < 0)
+               return USB_DR_MODE_UNKNOWN;
+
+       for (i = 0; i < ARRAY_SIZE(usb_dr_modes); i++)
+               if (!strcmp(dr_mode, usb_dr_modes[i]))
+                       return i;
+
+       return USB_DR_MODE_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(of_usb_get_dr_mode);
+#endif
+
 MODULE_LICENSE("GPL");
index 1d365316960cdad1a0a020d6d2757a0e5f73f85e..33a12788f9ca063c8aae13ce28d9b978f27a372c 100644 (file)
@@ -455,8 +455,8 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
                        dev_err(dev, "KEEPALIVE: device %u timed out\n",
                                wusb_dev->addr);
                        __wusbhc_dev_disconnect(wusbhc, wusb_port);
-               } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) {
-                       /* Approaching timeout cut out, need to refresh */
+               } else if (time_after(jiffies, wusb_dev->entry_ts + tt/3)) {
+                       /* Approaching timeout cut off, need to refresh */
                        ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
                }
        }
@@ -1062,7 +1062,7 @@ int wusbhc_devconnect_start(struct wusbhc *wusbhc)
        wusbhc->wuie_host_info = hi;
 
        queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
-                          (wusbhc->trust_timeout*CONFIG_HZ)/1000/2);
+                          msecs_to_jiffies(wusbhc->trust_timeout / 2));
 
        return 0;
 
index b8c72583c0405f6d2cac23ea5884b14bc2427672..b71760c8d3ad13b6697f4b39efc299364c2408b1 100644 (file)
@@ -195,6 +195,7 @@ int wusbhc_start(struct wusbhc *wusbhc)
        struct device *dev = wusbhc->dev;
 
        WARN_ON(wusbhc->wuie_host_info != NULL);
+       BUG_ON(wusbhc->uwb_rc == NULL);
 
        result = wusbhc_rsv_establish(wusbhc);
        if (result < 0) {
@@ -214,9 +215,9 @@ int wusbhc_start(struct wusbhc *wusbhc)
                dev_err(dev, "error starting security in the HC: %d\n", result);
                goto error_sec_start;
        }
-       /* FIXME: the choice of the DNTS parameters is somewhat
-        * arbitrary */
-       result = wusbhc->set_num_dnts(wusbhc, 0, 15);
+
+       result = wusbhc->set_num_dnts(wusbhc, wusbhc->dnts_interval,
+               wusbhc->dnts_num_slots);
        if (result < 0) {
                dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
                goto error_set_num_dnts;
@@ -276,12 +277,38 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
                }
                wusbhc->chid = *chid;
        }
+
+       /* register with UWB if we haven't already since we are about to start
+           the radio. */
+       if ((chid) && (wusbhc->uwb_rc == NULL)) {
+               wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);
+               if (wusbhc->uwb_rc == NULL) {
+                       result = -ENODEV;
+                       dev_err(wusbhc->dev, "Cannot get associated UWB Host Controller\n");
+                       goto error_rc_get;
+               }
+
+               result = wusbhc_pal_register(wusbhc);
+               if (result < 0) {
+                       dev_err(wusbhc->dev, "Cannot register as a UWB PAL\n");
+                       goto error_pal_register;
+               }
+       }
        mutex_unlock(&wusbhc->mutex);
 
        if (chid)
                result = uwb_radio_start(&wusbhc->pal);
        else
                uwb_radio_stop(&wusbhc->pal);
+
+       return result;
+
+error_pal_register:
+       uwb_rc_put(wusbhc->uwb_rc);
+       wusbhc->uwb_rc = NULL;
+error_rc_get:
+       mutex_unlock(&wusbhc->mutex);
+
        return result;
 }
 EXPORT_SYMBOL_GPL(wusbhc_chid_set);
index d0b172c5ecc77219f885bda14fabd2991b6688d1..59e100c2eb50d899d33cc0441a9a968d13354aae 100644 (file)
@@ -45,10 +45,11 @@ int wusbhc_pal_register(struct wusbhc *wusbhc)
 }
 
 /**
- * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL
+ * wusbhc_pal_unregister - unregister the WUSB HC as a UWB PAL
  * @wusbhc: the WUSB HC
  */
 void wusbhc_pal_unregister(struct wusbhc *wusbhc)
 {
-       uwb_pal_unregister(&wusbhc->pal);
+       if (wusbhc->uwb_rc)
+               uwb_pal_unregister(&wusbhc->pal);
 }
index 6f4fafdc240117d0fd2879001e0c63db8981901a..ead79f7939275bb7c1c46f50bc568c3c2a6d8f33 100644 (file)
@@ -80,6 +80,9 @@ int wusbhc_rsv_establish(struct wusbhc *wusbhc)
        struct uwb_dev_addr bcid;
        int ret;
 
+       if (rc == NULL)
+               return -ENODEV;
+
        rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
        if (rsv == NULL)
                return -ENOMEM;
index 59ff254dfb6fc294108824da98cf36b2883f753f..bdb0cc3046b5f598d5e6540155bcfea4982bc0f1 100644 (file)
@@ -393,26 +393,6 @@ int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
 }
 EXPORT_SYMBOL_GPL(wusbhc_rh_control);
 
-int wusbhc_rh_suspend(struct usb_hcd *usb_hcd)
-{
-       struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-       dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
-               usb_hcd, wusbhc);
-       /* dump_stack(); */
-       return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(wusbhc_rh_suspend);
-
-int wusbhc_rh_resume(struct usb_hcd *usb_hcd)
-{
-       struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-       dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
-               usb_hcd, wusbhc);
-       /* dump_stack(); */
-       return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(wusbhc_rh_resume);
-
 int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
 {
        struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
index f67f7f1e6df9502680868d2bfa5e0493df0cf36d..ada4e08706238780d4695008284a78988e628301 100644 (file)
@@ -134,9 +134,10 @@ static void wa_notif_dispatch(struct work_struct *ws)
                case WA_NOTIF_TRANSFER:
                        wa_handle_notif_xfer(wa, notif_hdr);
                        break;
+               case HWA_NOTIF_BPST_ADJ:
+                       break; /* no action needed for BPST ADJ. */
                case DWA_NOTIF_RWAKE:
                case DWA_NOTIF_PORTSTATUS:
-               case HWA_NOTIF_BPST_ADJ:
                        /* FIXME: unimplemented WA NOTIFs */
                        /* fallthru */
                default:
index f0d546c5a089d3513f037b2eb4c63c841fff8cb5..9a595c1ed867ddbe29fb1d4aa9995b025262eff7 100644 (file)
@@ -251,8 +251,8 @@ static int __rpipe_reset(struct wahc *wa, unsigned index)
 static struct usb_wireless_ep_comp_descriptor epc0 = {
        .bLength = sizeof(epc0),
        .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
-/*     .bMaxBurst = 1, */
-       .bMaxSequence = 31,
+       .bMaxBurst = 1,
+       .bMaxSequence = 2,
 };
 
 /*
@@ -317,6 +317,7 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
        struct device *dev = &wa->usb_iface->dev;
        struct usb_device *usb_dev = urb->dev;
        struct usb_wireless_ep_comp_descriptor *epcd;
+       u32 ack_window, epcd_max_sequence;
        u8 unauth;
 
        epcd = rpipe_epc_find(dev, ep);
@@ -333,8 +334,11 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
        rpipe->descr.wBlocks = cpu_to_le16(16);         /* given */
        /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
        rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
-       rpipe->descr.bHSHubAddress = 0;                 /* reserved: zero */
-       rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
+
+       rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,
+                               epcd->bMaxBurst, 16U), 1U);
+       rpipe->descr.hwa_bDeviceInfoIndex =
+                       wusb_port_no_to_idx(urb->dev->portnum);
        /* FIXME: use maximum speed as supported or recommended by device */
        rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
                UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
@@ -344,26 +348,26 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
                le16_to_cpu(rpipe->descr.wRPipeIndex),
                usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
 
-       /* see security.c:wusb_update_address() */
-       if (unlikely(urb->dev->devnum == 0x80))
-               rpipe->descr.bDeviceAddress = 0;
-       else
-               rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
+       rpipe->descr.hwa_reserved = 0;
+
        rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
        /* FIXME: bDataSequence */
        rpipe->descr.bDataSequence = 0;
-       /* FIXME: dwCurrentWindow */
-       rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
-       /* FIXME: bMaxDataSequence */
-       rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
+
+       /* start with base window of hwa_bMaxBurst bits starting at 0. */
+       ack_window = 0xFFFFFFFF >> (32 - rpipe->descr.hwa_bMaxBurst);
+       rpipe->descr.dwCurrentWindow = cpu_to_le32(ack_window);
+       epcd_max_sequence = max(min_t(unsigned int,
+                       epcd->bMaxSequence, 32U), 2U);
+       rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1;
        rpipe->descr.bInterval = ep->desc.bInterval;
        /* FIXME: bOverTheAirInterval */
        rpipe->descr.bOverTheAirInterval = 0;   /* 0 if not isoc */
        /* FIXME: xmit power & preamble blah blah */
-       rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
+       rpipe->descr.bmAttribute = (ep->desc.bmAttributes &
+                                       USB_ENDPOINT_XFERTYPE_MASK);
        /* rpipe->descr.bmCharacteristics RO */
-       /* FIXME: bmRetryOptions */
-       rpipe->descr.bmRetryOptions = 15;
+       rpipe->descr.bmRetryOptions = (wa->wusb->retry_count & 0xF);
        /* FIXME: use for assessing link quality? */
        rpipe->descr.wNumTransactionErrors = 0;
        result = __rpipe_set_descr(wa, &rpipe->descr,
@@ -387,10 +391,8 @@ static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
                           const struct usb_host_endpoint *ep,
                           const struct urb *urb, gfp_t gfp)
 {
-       int result = 0;         /* better code for lack of companion? */
+       int result = 0;
        struct device *dev = &wa->usb_iface->dev;
-       struct usb_device *usb_dev = urb->dev;
-       u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
        u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
 
 #define AIM_CHECK(rdf, val, text)                                      \
@@ -403,13 +405,10 @@ static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
                        WARN_ON(1);                                     \
                }                                                       \
        } while (0)
-       AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
-                 "(%u vs %u)");
-       AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
+       AIM_CHECK(hwa_bDeviceInfoIndex, portnum, "(%u vs %u)");
        AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
                        UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
                  "(%u vs %u)");
-       AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
        AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
        AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
        AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
index 6ef94bce8c0dc49a5e118c642dbcb4e6c2beddf3..16968c899493db24a40481e151b0da591405c1bf 100644 (file)
@@ -85,6 +85,7 @@
 #include <linux/hash.h>
 #include <linux/ratelimit.h>
 #include <linux/export.h>
+#include <linux/scatterlist.h>
 
 #include "wa-hc.h"
 #include "wusbhc.h"
@@ -442,8 +443,7 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
                goto error;
        }
        xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
-       xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
-               / xfer->seg_size;
+       xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
        if (xfer->segs >= WA_SEGS_MAX) {
                dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
                        (int)(urb->transfer_buffer_length / xfer->seg_size),
@@ -627,6 +627,86 @@ static void wa_seg_cb(struct urb *urb)
        }
 }
 
+/* allocate an SG list to store bytes_to_transfer bytes and copy the
+ * subset of the in_sg that matches the buffer subset
+ * we are about to transfer. */
+static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
+       const unsigned int bytes_transferred,
+       const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
+{
+       struct scatterlist *out_sg;
+       unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
+               nents;
+       struct scatterlist *current_xfer_sg = in_sg;
+       struct scatterlist *current_seg_sg, *last_seg_sg;
+
+       /* skip previously transferred pages. */
+       while ((current_xfer_sg) &&
+                       (bytes_processed < bytes_transferred)) {
+               bytes_processed += current_xfer_sg->length;
+
+               /* advance the sg if current segment starts on or past the
+                       next page. */
+               if (bytes_processed <= bytes_transferred)
+                       current_xfer_sg = sg_next(current_xfer_sg);
+       }
+
+       /* the data for the current segment starts in current_xfer_sg.
+               calculate the offset. */
+       if (bytes_processed > bytes_transferred) {
+               offset_into_current_page_data = current_xfer_sg->length -
+                       (bytes_processed - bytes_transferred);
+       }
+
+       /* calculate the number of pages needed by this segment. */
+       nents = DIV_ROUND_UP((bytes_to_transfer +
+               offset_into_current_page_data +
+               current_xfer_sg->offset),
+               PAGE_SIZE);
+
+       out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
+       if (out_sg) {
+               sg_init_table(out_sg, nents);
+
+               /* copy the portion of the incoming SG that correlates to the
+                * data to be transferred by this segment to the segment SG. */
+               last_seg_sg = current_seg_sg = out_sg;
+               bytes_processed = 0;
+
+               /* reset nents and calculate the actual number of sg entries
+                       needed. */
+               nents = 0;
+               while ((bytes_processed < bytes_to_transfer) &&
+                               current_seg_sg && current_xfer_sg) {
+                       unsigned int page_len = min((current_xfer_sg->length -
+                               offset_into_current_page_data),
+                               (bytes_to_transfer - bytes_processed));
+
+                       sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
+                               page_len,
+                               current_xfer_sg->offset +
+                               offset_into_current_page_data);
+
+                       bytes_processed += page_len;
+
+                       last_seg_sg = current_seg_sg;
+                       current_seg_sg = sg_next(current_seg_sg);
+                       current_xfer_sg = sg_next(current_xfer_sg);
+
+                       /* only the first page may require additional offset. */
+                       offset_into_current_page_data = 0;
+                       nents++;
+               }
+
+               /* update num_sgs and terminate the list since we may have
+                *  concatenated pages. */
+               sg_mark_end(last_seg_sg);
+               *out_num_sgs = nents;
+       }
+
+       return out_sg;
+}
+
 /*
  * Allocate the segs array and initialize each of them
  *
@@ -663,9 +743,9 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
                                                  dto_epd->bEndpointAddress),
                                  &seg->xfer_hdr, xfer_hdr_size,
                                  wa_seg_cb, seg);
-               buf_itr_size = buf_size > xfer->seg_size ?
-                       xfer->seg_size : buf_size;
+               buf_itr_size = min(buf_size, xfer->seg_size);
                if (xfer->is_inbound == 0 && buf_size > 0) {
+                       /* outbound data. */
                        seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
                        if (seg->dto_urb == NULL)
                                goto error_dto_alloc;
@@ -679,9 +759,42 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
                                        xfer->urb->transfer_dma + buf_itr;
                                seg->dto_urb->transfer_flags |=
                                        URB_NO_TRANSFER_DMA_MAP;
-                       } else
-                               seg->dto_urb->transfer_buffer =
-                                       xfer->urb->transfer_buffer + buf_itr;
+                               seg->dto_urb->transfer_buffer = NULL;
+                               seg->dto_urb->sg = NULL;
+                               seg->dto_urb->num_sgs = 0;
+                       } else {
+                               /* do buffer or SG processing. */
+                               seg->dto_urb->transfer_flags &=
+                                       ~URB_NO_TRANSFER_DMA_MAP;
+                               /* this should always be 0 before a resubmit. */
+                               seg->dto_urb->num_mapped_sgs = 0;
+
+                               if (xfer->urb->transfer_buffer) {
+                                       seg->dto_urb->transfer_buffer =
+                                               xfer->urb->transfer_buffer +
+                                               buf_itr;
+                                       seg->dto_urb->sg = NULL;
+                                       seg->dto_urb->num_sgs = 0;
+                               } else {
+                                       /* allocate an SG list to store seg_size
+                                           bytes and copy the subset of the
+                                           xfer->urb->sg that matches the
+                                           buffer subset we are about to read.
+                                       */
+                                       seg->dto_urb->sg =
+                                               wa_xfer_create_subset_sg(
+                                               xfer->urb->sg,
+                                               buf_itr, buf_itr_size,
+                                               &(seg->dto_urb->num_sgs));
+
+                                       if (!(seg->dto_urb->sg)) {
+                                               seg->dto_urb->num_sgs   = 0;
+                                               goto error_sg_alloc;
+                                       }
+
+                                       seg->dto_urb->transfer_buffer = NULL;
+                               }
+                       }
                        seg->dto_urb->transfer_buffer_length = buf_itr_size;
                }
                seg->status = WA_SEG_READY;
@@ -690,6 +803,8 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
        }
        return 0;
 
+error_sg_alloc:
+       kfree(seg->dto_urb);
 error_dto_alloc:
        kfree(xfer->seg[cnt]);
        cnt--;
@@ -1026,7 +1141,8 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
        unsigned long my_flags;
        unsigned cant_sleep = irqs_disabled() | in_atomic();
 
-       if (urb->transfer_buffer == NULL
+       if ((urb->transfer_buffer == NULL)
+           && (urb->sg == NULL)
            && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
            && urb->transfer_buffer_length != 0) {
                dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
@@ -1261,7 +1377,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
        seg = xfer->seg[seg_idx];
        rpipe = xfer->ep->hcpriv;
        usb_status = xfer_result->bTransferStatus;
-       dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
+       dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
                xfer, seg_idx, usb_status, seg->status);
        if (seg->status == WA_SEG_ABORTED
            || seg->status == WA_SEG_ERROR)     /* already handled */
@@ -1276,8 +1392,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
        }
        if (usb_status & 0x80) {
                seg->result = wa_xfer_status_to_errno(usb_status);
-               dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
-                       xfer, seg->index, usb_status);
+               dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
+                       xfer, xfer->id, seg->index, usb_status);
                goto error_complete;
        }
        /* FIXME: we ignore warnings, tally them for stats */
@@ -1286,18 +1402,47 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
        if (xfer->is_inbound) { /* IN data phase: read to buffer */
                seg->status = WA_SEG_DTI_PENDING;
                BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+               /* this should always be 0 before a resubmit. */
+               wa->buf_in_urb->num_mapped_sgs  = 0;
+
                if (xfer->is_dma) {
                        wa->buf_in_urb->transfer_dma =
                                xfer->urb->transfer_dma
-                               + seg_idx * xfer->seg_size;
+                               + (seg_idx * xfer->seg_size);
                        wa->buf_in_urb->transfer_flags
                                |= URB_NO_TRANSFER_DMA_MAP;
+                       wa->buf_in_urb->transfer_buffer = NULL;
+                       wa->buf_in_urb->sg = NULL;
+                       wa->buf_in_urb->num_sgs = 0;
                } else {
-                       wa->buf_in_urb->transfer_buffer =
-                               xfer->urb->transfer_buffer
-                               + seg_idx * xfer->seg_size;
+                       /* do buffer or SG processing. */
                        wa->buf_in_urb->transfer_flags
                                &= ~URB_NO_TRANSFER_DMA_MAP;
+
+                       if (xfer->urb->transfer_buffer) {
+                               wa->buf_in_urb->transfer_buffer =
+                                       xfer->urb->transfer_buffer
+                                       + (seg_idx * xfer->seg_size);
+                               wa->buf_in_urb->sg = NULL;
+                               wa->buf_in_urb->num_sgs = 0;
+                       } else {
+                               /* allocate an SG list to store seg_size bytes
+                                       and copy the subset of the xfer->urb->sg
+                                       that matches the buffer subset we are
+                                       about to read. */
+                               wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
+                                       xfer->urb->sg,
+                                       seg_idx * xfer->seg_size,
+                                       le32_to_cpu(
+                                               xfer_result->dwTransferLength),
+                                       &(wa->buf_in_urb->num_sgs));
+
+                               if (!(wa->buf_in_urb->sg)) {
+                                       wa->buf_in_urb->num_sgs = 0;
+                                       goto error_sg_alloc;
+                               }
+                               wa->buf_in_urb->transfer_buffer = NULL;
+                       }
                }
                wa->buf_in_urb->transfer_buffer_length =
                        le32_to_cpu(xfer_result->dwTransferLength);
@@ -1330,6 +1475,8 @@ error_submit_buf_in:
                dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
                        xfer, seg_idx, result);
        seg->result = result;
+       kfree(wa->buf_in_urb->sg);
+error_sg_alloc:
 error_complete:
        seg->status = WA_SEG_ERROR;
        xfer->segs_done++;
@@ -1381,6 +1528,10 @@ static void wa_buf_in_cb(struct urb *urb)
        unsigned long flags;
        u8 done = 0;
 
+       /* free the sg if it was used. */
+       kfree(urb->sg);
+       urb->sg = NULL;
+
        switch (urb->status) {
        case 0:
                spin_lock_irqsave(&xfer->lock, flags);
index 0faca16df7656c38f14ba8af22ce9acda5735030..742c607d1fa3e3f92ce0481f5e90b1a6c1935db7 100644 (file)
@@ -75,12 +75,11 @@ static ssize_t wusb_trust_timeout_store(struct device *dev,
                result = -EINVAL;
                goto out;
        }
-       /* FIXME: maybe we should check for range validity? */
-       wusbhc->trust_timeout = trust_timeout;
+       wusbhc->trust_timeout = min_t(unsigned, trust_timeout, 500);
        cancel_delayed_work(&wusbhc->keep_alive_timer);
        flush_workqueue(wusbd);
        queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
-                          (trust_timeout * CONFIG_HZ)/1000/2);
+                          msecs_to_jiffies(wusbhc->trust_timeout / 2));
 out:
        return result < 0 ? result : size;
 }
@@ -176,11 +175,72 @@ static ssize_t wusb_phy_rate_store(struct device *dev,
 }
 static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store);
 
+static ssize_t wusb_dnts_show(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+       return sprintf(buf, "num slots: %d\ninterval: %dms\n",
+                       wusbhc->dnts_num_slots, wusbhc->dnts_interval);
+}
+
+static ssize_t wusb_dnts_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+       uint8_t num_slots, interval;
+       ssize_t result;
+
+       result = sscanf(buf, "%hhu %hhu", &num_slots, &interval);
+
+       if (result != 2)
+               return -EINVAL;
+
+       wusbhc->dnts_num_slots = num_slots;
+       wusbhc->dnts_interval = interval;
+
+       return size;
+}
+static DEVICE_ATTR(wusb_dnts, 0644, wusb_dnts_show, wusb_dnts_store);
+
+static ssize_t wusb_retry_count_show(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+       return sprintf(buf, "%d\n", wusbhc->retry_count);
+}
+
+static ssize_t wusb_retry_count_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+       uint8_t retry_count;
+       ssize_t result;
+
+       result = sscanf(buf, "%hhu", &retry_count);
+
+       if (result != 1)
+               return -EINVAL;
+
+       wusbhc->retry_count = max_t(uint8_t, retry_count, WUSB_RETRY_COUNT_MAX);
+
+       return size;
+}
+static DEVICE_ATTR(wusb_retry_count, 0644, wusb_retry_count_show,
+       wusb_retry_count_store);
+
 /* Group all the WUSBHC attributes */
 static struct attribute *wusbhc_attrs[] = {
                &dev_attr_wusb_trust_timeout.attr,
                &dev_attr_wusb_chid.attr,
                &dev_attr_wusb_phy_rate.attr,
+               &dev_attr_wusb_dnts.attr,
+               &dev_attr_wusb_retry_count.attr,
                NULL,
 };
 
@@ -206,8 +266,12 @@ int wusbhc_create(struct wusbhc *wusbhc)
 {
        int result = 0;
 
+       /* set defaults.  These can be overwritten using sysfs attributes. */
        wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
        wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
+       wusbhc->dnts_num_slots = 4;
+       wusbhc->dnts_interval = 2;
+       wusbhc->retry_count = WUSB_RETRY_COUNT_INFINITE;
 
        mutex_init(&wusbhc->mutex);
        result = wusbhc_mmcie_create(wusbhc);
@@ -261,13 +325,7 @@ int wusbhc_b_create(struct wusbhc *wusbhc)
                goto error_create_attr_group;
        }
 
-       result = wusbhc_pal_register(wusbhc);
-       if (result < 0)
-               goto error_pal_register;
        return 0;
-
-error_pal_register:
-       sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
 error_create_attr_group:
        return result;
 }
@@ -393,7 +451,8 @@ EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
  */
 void wusbhc_reset_all(struct wusbhc *wusbhc)
 {
-       uwb_rc_reset_all(wusbhc->uwb_rc);
+       if (wusbhc->uwb_rc)
+               uwb_rc_reset_all(wusbhc->uwb_rc);
 }
 EXPORT_SYMBOL_GPL(wusbhc_reset_all);
 
index 3a2d09162e70f2d6480bf0f7f1ea3b19e48c07b2..711b1952b114ab09322a2b11289cf7aa5fe0d433 100644 (file)
@@ -69,6 +69,8 @@
  * zone 0.
  */
 #define WUSB_CHANNEL_STOP_DELAY_MS 8
+#define WUSB_RETRY_COUNT_MAX 15
+#define WUSB_RETRY_COUNT_INFINITE 0
 
 /**
  * Wireless USB device
@@ -252,6 +254,9 @@ struct wusbhc {
        unsigned trust_timeout;                 /* in jiffies */
        struct wusb_ckhdid chid;
        uint8_t phy_rate;
+       uint8_t dnts_num_slots;
+       uint8_t dnts_interval;
+       uint8_t retry_count;
        struct wuie_host_info *wuie_host_info;
 
        struct mutex mutex;                     /* locks everything else */
@@ -399,8 +404,6 @@ extern void wusbhc_rh_destroy(struct wusbhc *);
 
 extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
 extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
-extern int wusbhc_rh_suspend(struct usb_hcd *);
-extern int wusbhc_rh_resume(struct usb_hcd *);
 extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
 
 /* MMC handling */
index 3fbcf789dfaad71ec81f5f3b9e8776ec18abaa26..16ada8341c46dedb907df45392e669d42f967dec 100644 (file)
@@ -67,14 +67,14 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
        } else
                dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
 
-       spin_lock_bh(&rc->rsvs_lock);
+       spin_lock_irq(&rc->rsvs_lock);
        if (rc->set_drp_ie_pending > 1) {
                rc->set_drp_ie_pending = 0;
                uwb_rsv_queue_update(rc);       
        } else {
                rc->set_drp_ie_pending = 0;     
        }
-       spin_unlock_bh(&rc->rsvs_lock);
+       spin_unlock_irq(&rc->rsvs_lock);
 }
 
 /**
index 86ed7e61e597404e11b6cbc084e63e2ae48b93d7..457f31d99bf42d4cbd9240f6a6b450a298fdd1e2 100644 (file)
@@ -436,7 +436,6 @@ ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
        unsigned long flags;
        unsigned itr;
        u16 type_event_high, event;
-       u8 *ptr = (u8 *) rceb;
 
        read_lock_irqsave(&uwb_est_lock, flags);
        size = -ENOSPC;
@@ -453,12 +452,12 @@ ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
                if (size != -ENOENT)
                        goto out;
        }
-       dev_dbg(dev, "event 0x%02x/%04x/%02x: no handlers available; "
-               "RCEB %02x %02x %02x %02x\n",
+       dev_dbg(dev,
+               "event 0x%02x/%04x/%02x: no handlers available; RCEB %4ph\n",
                (unsigned) rceb->bEventType,
                (unsigned) le16_to_cpu(rceb->wEvent),
                (unsigned) rceb->bEventContext,
-               ptr[0], ptr[1], ptr[2], ptr[3]);
+               rceb);
        size = -ENOENT;
 out:
        read_unlock_irqrestore(&uwb_est_lock, flags);
index 810c90ae2c5584fc57113813f2a9251422724840..0621abef9b4a0e2ecb1291eda1650f7529a64d8c 100644 (file)
@@ -900,6 +900,12 @@ static const struct usb_device_id hwarc_id_table[] = {
        /* Intel i1480 (using firmware 1.3PA2-20070828) */
        { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02),
          .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
+       /* Alereon 5310 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x01, 0x02),
+         .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
+       /* Alereon 5611 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x01, 0x02),
+         .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
        /* Generic match for the Radio Control interface */
        { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), },
        { },
index 8ee7d90a8c68ac04b97f82a99f1097a114087e01..690577d2a35bf3494f3e9886c7f9ca4fd21fb17c 100644 (file)
@@ -44,10 +44,12 @@ int uwb_pal_register(struct uwb_pal *pal)
        int ret;
 
        if (pal->device) {
+               /* create a link to the uwb_rc in the PAL device's directory. */
                ret = sysfs_create_link(&pal->device->kobj,
                                        &rc->uwb_dev.dev.kobj, "uwb_rc");
                if (ret < 0)
                        return ret;
+               /* create a link to the PAL in the UWB device's directory. */
                ret = sysfs_create_link(&rc->uwb_dev.dev.kobj,
                                        &pal->device->kobj, pal->name);
                if (ret < 0) {
index f4ae05f78c42bc5c20753af530cac27331d92ba7..738e8a8cb811ddd86097e2a1f180cec96057e6ea 100644 (file)
@@ -872,7 +872,7 @@ void uwb_rsv_queue_update(struct uwb_rc *rc)
  */
 void uwb_rsv_sched_update(struct uwb_rc *rc)
 {
-       spin_lock_bh(&rc->rsvs_lock);
+       spin_lock_irq(&rc->rsvs_lock);
        if (!delayed_work_pending(&rc->rsv_update_work)) {
                if (rc->set_drp_ie_pending > 0) {
                        rc->set_drp_ie_pending++;
@@ -881,7 +881,7 @@ void uwb_rsv_sched_update(struct uwb_rc *rc)
                uwb_rsv_queue_update(rc);
        }
 unlock:
-       spin_unlock_bh(&rc->rsvs_lock);
+       spin_unlock_irq(&rc->rsvs_lock);
 }
 
 /*
index a7494bf100815279be8420f435786e6d513651fc..9a103b100f1e61a3538e9184d6e9b2a141f2f45d 100644 (file)
@@ -55,7 +55,8 @@ static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc)
 
 static inline void __uwb_rc_put(struct uwb_rc *rc)
 {
-       uwb_dev_put(&rc->uwb_dev);
+       if (rc)
+               uwb_dev_put(&rc->uwb_dev);
 }
 
 extern int uwb_rc_reset(struct uwb_rc *rc);
index f48093e649e4203ecb9408406b44e42f936e9ff2..c9df8ba97daec8f644bc0ff9cc77afe513510903 100644 (file)
@@ -253,19 +253,7 @@ static struct pci_driver whci_driver = {
        .remove   = whci_remove,
 };
 
-static int __init whci_init(void)
-{
-       return pci_register_driver(&whci_driver);
-}
-
-static void __exit whci_exit(void)
-{
-       pci_unregister_driver(&whci_driver);
-}
-
-module_init(whci_init);
-module_exit(whci_exit);
-
+module_pci_driver(whci_driver);
 MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator");
 MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
 MODULE_LICENSE("GPL");
index ac3725440d648ab4f3efefda246be390b6be643e..c5179e269df64ade2de20a384b6732a6922c9bcf 100644 (file)
@@ -499,7 +499,6 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
        }
 
        vma->vm_private_data = vdev;
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
 
index 700cac067b4611891af50d90ee083bdbb0f94eb9..ebeb9715f0616fd87071f4a1e6f858c65a7662f1 100644 (file)
@@ -385,8 +385,6 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
 
-       vma->vm_flags |= VM_IO;
-
        if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
                                vma->vm_end - vma->vm_start,
                                vma->vm_page_prot)) {
index 1b59054fc6a4ccb5b2c2c12ca9ceb6070ed53bd2..301224ecc9507186e78e9cc14576e7a5316b227d 100644 (file)
@@ -1258,13 +1258,9 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
 
-       vma->vm_flags |= VM_IO;
-
        return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
                                  vma->vm_end - vma->vm_start,
                                  vma->vm_page_prot);
-
-       return 0;
 }
 
 static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
index a92783e480e66b60755dc0dcc3052d2d75d17fe1..d55b33757465bf971dad6af8e4698518b717b944 100644 (file)
@@ -556,34 +556,6 @@ static int do_fbcon_takeover(int show_logo)
        return err;
 }
 
-static int fbcon_takeover(int show_logo)
-{
-       int err, i;
-
-       if (!num_registered_fb)
-               return -ENODEV;
-
-       if (!show_logo)
-               logo_shown = FBCON_LOGO_DONTSHOW;
-
-       for (i = first_fb_vc; i <= last_fb_vc; i++)
-               con2fb_map[i] = info_idx;
-
-       err = take_over_console(&fb_con, first_fb_vc, last_fb_vc,
-                               fbcon_is_default);
-
-       if (err) {
-               for (i = first_fb_vc; i <= last_fb_vc; i++) {
-                       con2fb_map[i] = -1;
-               }
-               info_idx = -1;
-       } else {
-               fbcon_has_console_bind = 1;
-       }
-
-       return err;
-}
-
 #ifdef MODULE
 static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
                               int cols, int rows, int new_cols, int new_rows)
@@ -901,7 +873,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
 /*
  *  Low Level Operations
  */
-/* NOTE: fbcon cannot be __init: it may be called from take_over_console later */
+/* NOTE: fbcon cannot be __init: it may be called from do_take_over_console later */
 static int var_to_display(struct display *disp,
                          struct fb_var_screeninfo *var,
                          struct fb_info *info)
@@ -3543,8 +3515,9 @@ static void fbcon_start(void)
                        }
                }
 
+               do_fbcon_takeover(0);
                console_unlock();
-               fbcon_takeover(0);
+
        }
 }
 
@@ -3648,8 +3621,8 @@ static void __exit fb_console_exit(void)
        fbcon_deinit_device();
        device_destroy(fb_class, MKDEV(0, 0));
        fbcon_exit();
+       do_unregister_con_driver(&fb_con);
        console_unlock();
-       unregister_con_driver(&fb_con);
 }      
 
 module_exit(fb_console_exit);
index 0b67866cae10334200e1570d421cc5f74d56c7ea..296e945615560b85cf2f7d50990776f46eeb119a 100644 (file)
@@ -585,10 +585,14 @@ static const struct consw mda_con = {
 
 int __init mda_console_init(void)
 {
+       int err;
+
        if (mda_first_vc > mda_last_vc)
                return 1;
-
-       return take_over_console(&mda_con, mda_first_vc-1, mda_last_vc-1, 0);
+       console_lock();
+       err = do_take_over_console(&mda_con, mda_first_vc-1, mda_last_vc-1, 0);
+       console_unlock();
+       return err;
 }
 
 static void __exit mda_console_exit(void)
index b05afd03729eb7c1155aac758ea538948d81fb9f..a6ab9299813c9558dcf9e2cb2ec629e3b557e398 100644 (file)
@@ -297,7 +297,7 @@ static void newport_exit(void)
                newport_set_def_font(i, NULL);
 }
 
-/* Can't be __init, take_over_console may call it later */
+/* Can't be __init, do_take_over_console may call it later */
 static const char *newport_startup(void)
 {
        int i;
@@ -746,6 +746,7 @@ static int newport_probe(struct gio_device *dev,
                         const struct gio_device_id *id)
 {
        unsigned long newport_addr;
+       int err;
 
        if (!dev->resource.start)
                return -EINVAL;
@@ -759,8 +760,10 @@ static int newport_probe(struct gio_device *dev,
 
        npregs = (struct newport_regs *)/* ioremap cannot fail */
                ioremap(newport_addr, sizeof(struct newport_regs));
-
-       return take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
+       console_lock();
+       err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
+       console_unlock();
+       return err;
 }
 
 static void newport_remove(struct gio_device *dev)
index 491c1c1baf4ccd652eadf85079544686064e3228..5f65ca3d85645edabc1b4f051173ebdb1d4c3d35 100644 (file)
@@ -372,6 +372,7 @@ static const struct consw sti_con = {
 
 static int __init sticonsole_init(void)
 {
+    int err;
     /* already initialized ? */
     if (sticon_sti)
         return 0;
@@ -382,7 +383,10 @@ static int __init sticonsole_init(void)
 
     if (conswitchp == &dummy_con) {
        printk(KERN_INFO "sticon: Initializing STI text console.\n");
-       return take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1, 1);
+       console_lock();
+       err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1, 1);
+       console_unlock();
+       return err;
     }
     return 0;
 }
index 97563c55af63e46d8a6a00cf6f036b3df6e8e203..7cf0b13d061b624192c35092073963b641b41c52 100644 (file)
@@ -494,7 +494,6 @@ pxa3xx_gcu_misc_mmap(struct file *file, struct vm_area_struct *vma)
                if (size != resource_size(priv->resource_mem))
                        return -EINVAL;
 
-               vma->vm_flags |= VM_IO;
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
                return io_remap_pfn_range(vma, vma->vm_start,
index 055562c580b43e673615811f4bdc6e778180420b..9ff073f4090afee750e4058129427d4bd6f6117f 100644 (file)
@@ -148,13 +148,14 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
  * @offset: offset in the page
  */
 
-static void v9fs_invalidate_page(struct page *page, unsigned long offset)
+static void v9fs_invalidate_page(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
        /*
         * If called with zero offset, we should release
         * the private state assocated with the page
         */
-       if (offset == 0)
+       if (offset == 0 && length == PAGE_CACHE_SIZE)
                v9fs_fscache_invalidate_page(page);
 }
 
index be1e34adc3c602ca0e2eb49f3607c7076ddafb3c..4d0c2e0be7e5cf86f2d5fd0137cb9467d654023f 100644 (file)
@@ -101,16 +101,15 @@ static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen)
 }
 
 /**
- * v9fs_dir_readdir - read a directory
- * @filp: opened file structure
- * @dirent: directory structure ???
- * @filldir: function to populate directory structure ???
+ * v9fs_dir_readdir - iterate through a directory
+ * @file: opened file structure
+ * @ctx: actor we feed the entries to
  *
  */
 
-static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
 {
-       int over;
+       bool over;
        struct p9_wstat st;
        int err = 0;
        struct p9_fid *fid;
@@ -118,19 +117,19 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
        int reclen = 0;
        struct p9_rdir *rdir;
 
-       p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
-       fid = filp->private_data;
+       p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name);
+       fid = file->private_data;
 
        buflen = fid->clnt->msize - P9_IOHDRSZ;
 
-       rdir = v9fs_alloc_rdir_buf(filp, buflen);
+       rdir = v9fs_alloc_rdir_buf(file, buflen);
        if (!rdir)
                return -ENOMEM;
 
        while (1) {
                if (rdir->tail == rdir->head) {
-                       err = v9fs_file_readn(filp, rdir->buf, NULL,
-                                                       buflen, filp->f_pos);
+                       err = v9fs_file_readn(file, rdir->buf, NULL,
+                                                       buflen, ctx->pos);
                        if (err <= 0)
                                return err;
 
@@ -148,51 +147,45 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        }
                        reclen = st.size+2;
 
-                       over = filldir(dirent, st.name, strlen(st.name),
-                           filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st));
-
+                       over = !dir_emit(ctx, st.name, strlen(st.name),
+                                        v9fs_qid2ino(&st.qid), dt_type(&st));
                        p9stat_free(&st);
-
                        if (over)
                                return 0;
 
                        rdir->head += reclen;
-                       filp->f_pos += reclen;
+                       ctx->pos += reclen;
                }
        }
 }
 
 /**
- * v9fs_dir_readdir_dotl - read a directory
- * @filp: opened file structure
- * @dirent: buffer to fill dirent structures
- * @filldir: function to populate dirent structures
+ * v9fs_dir_readdir_dotl - iterate through a directory
+ * @file: opened file structure
+ * @ctx: actor we feed the entries to
  *
  */
-static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
-                                               filldir_t filldir)
+static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
 {
-       int over;
        int err = 0;
        struct p9_fid *fid;
        int buflen;
        struct p9_rdir *rdir;
        struct p9_dirent curdirent;
-       u64 oldoffset = 0;
 
-       p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
-       fid = filp->private_data;
+       p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name);
+       fid = file->private_data;
 
        buflen = fid->clnt->msize - P9_READDIRHDRSZ;
 
-       rdir = v9fs_alloc_rdir_buf(filp, buflen);
+       rdir = v9fs_alloc_rdir_buf(file, buflen);
        if (!rdir)
                return -ENOMEM;
 
        while (1) {
                if (rdir->tail == rdir->head) {
                        err = p9_client_readdir(fid, rdir->buf, buflen,
-                                               filp->f_pos);
+                                               ctx->pos);
                        if (err <= 0)
                                return err;
 
@@ -210,22 +203,13 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
                                return -EIO;
                        }
 
-                       /* d_off in dirent structure tracks the offset into
-                        * the next dirent in the dir. However, filldir()
-                        * expects offset into the current dirent. Hence
-                        * while calling filldir send the offset from the
-                        * previous dirent structure.
-                        */
-                       over = filldir(dirent, curdirent.d_name,
-                                       strlen(curdirent.d_name),
-                                       oldoffset, v9fs_qid2ino(&curdirent.qid),
-                                       curdirent.d_type);
-                       oldoffset = curdirent.d_off;
-
-                       if (over)
+                       if (!dir_emit(ctx, curdirent.d_name,
+                                     strlen(curdirent.d_name),
+                                     v9fs_qid2ino(&curdirent.qid),
+                                     curdirent.d_type))
                                return 0;
 
-                       filp->f_pos = curdirent.d_off;
+                       ctx->pos = curdirent.d_off;
                        rdir->head += err;
                }
        }
@@ -254,7 +238,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
 const struct file_operations v9fs_dir_operations = {
        .read = generic_read_dir,
        .llseek = generic_file_llseek,
-       .readdir = v9fs_dir_readdir,
+       .iterate = v9fs_dir_readdir,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
 };
@@ -262,7 +246,7 @@ const struct file_operations v9fs_dir_operations = {
 const struct file_operations v9fs_dir_operations_dotl = {
        .read = generic_read_dir,
        .llseek = generic_file_llseek,
-       .readdir = v9fs_dir_readdir_dotl,
+       .iterate = v9fs_dir_readdir_dotl,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
         .fsync = v9fs_file_fsync_dotl,
index 9cf874ce8336563b1d50d3276fab9832e977b781..ade28bb058e311dc3b2f120e2405884c4e090f7d 100644 (file)
 static DEFINE_RWLOCK(adfs_dir_lock);
 
 static int
-adfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+adfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
        struct object_info obj;
        struct adfs_dir dir;
        int ret = 0;
 
-       if (filp->f_pos >> 32)
-               goto out;
+       if (ctx->pos >> 32)
+               return 0;
 
        ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
        if (ret)
-               goto out;
+               return ret;
 
-       switch ((unsigned long)filp->f_pos) {
-       case 0:
-               if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+       if (ctx->pos == 0) {
+               if (!dir_emit_dot(file, ctx))
                        goto free_out;
-               filp->f_pos += 1;
-
-       case 1:
-               if (filldir(dirent, "..", 2, 1, dir.parent_id, DT_DIR) < 0)
+               ctx->pos = 1;
+       }
+       if (ctx->pos == 1) {
+               if (!dir_emit(ctx, "..", 2, dir.parent_id, DT_DIR))
                        goto free_out;
-               filp->f_pos += 1;
-
-       default:
-               break;
+               ctx->pos = 2;
        }
 
        read_lock(&adfs_dir_lock);
 
-       ret = ops->setpos(&dir, filp->f_pos - 2);
+       ret = ops->setpos(&dir, ctx->pos - 2);
        if (ret)
                goto unlock_out;
        while (ops->getnext(&dir, &obj) == 0) {
-               if (filldir(dirent, obj.name, obj.name_len,
-                           filp->f_pos, obj.file_id, DT_UNKNOWN) < 0)
-                       goto unlock_out;
-               filp->f_pos += 1;
+               if (!dir_emit(ctx, obj.name, obj.name_len,
+                           obj.file_id, DT_UNKNOWN))
+                       break;
+               ctx->pos++;
        }
 
 unlock_out:
@@ -65,8 +61,6 @@ unlock_out:
 
 free_out:
        ops->free(&dir);
-
-out:
        return ret;
 }
 
@@ -192,7 +186,7 @@ out:
 const struct file_operations adfs_dir_operations = {
        .read           = generic_read_dir,
        .llseek         = generic_file_llseek,
-       .readdir        = adfs_readdir,
+       .iterate        = adfs_readdir,
        .fsync          = generic_file_fsync,
 };
 
index fd11a6d608ee268c5813e028ecf4703a643f804f..f1eba8c3644e9800397f6d1b7cc3e6ceaf723bbd 100644 (file)
 
 #include "affs.h"
 
-static int affs_readdir(struct file *, void *, filldir_t);
+static int affs_readdir(struct file *, struct dir_context *);
 
 const struct file_operations affs_dir_operations = {
        .read           = generic_read_dir,
        .llseek         = generic_file_llseek,
-       .readdir        = affs_readdir,
+       .iterate        = affs_readdir,
        .fsync          = affs_file_fsync,
 };
 
@@ -40,52 +40,35 @@ const struct inode_operations affs_dir_inode_operations = {
 };
 
 static int
-affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+affs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode            *inode = file_inode(filp);
+       struct inode            *inode = file_inode(file);
        struct super_block      *sb = inode->i_sb;
-       struct buffer_head      *dir_bh;
-       struct buffer_head      *fh_bh;
+       struct buffer_head      *dir_bh = NULL;
+       struct buffer_head      *fh_bh = NULL;
        unsigned char           *name;
        int                      namelen;
        u32                      i;
        int                      hash_pos;
        int                      chain_pos;
-       u32                      f_pos;
        u32                      ino;
-       int                      stored;
-       int                      res;
 
-       pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)filp->f_pos);
+       pr_debug("AFFS: readdir(ino=%lu,f_pos=%lx)\n",inode->i_ino,(unsigned long)ctx->pos);
 
-       stored = 0;
-       res    = -EIO;
-       dir_bh = NULL;
-       fh_bh  = NULL;
-       f_pos  = filp->f_pos;
-
-       if (f_pos == 0) {
-               filp->private_data = (void *)0;
-               if (filldir(dirent, ".", 1, f_pos, inode->i_ino, DT_DIR) < 0)
+       if (ctx->pos < 2) {
+               file->private_data = (void *)0;
+               if (!dir_emit_dots(file, ctx))
                        return 0;
-               filp->f_pos = f_pos = 1;
-               stored++;
-       }
-       if (f_pos == 1) {
-               if (filldir(dirent, "..", 2, f_pos, parent_ino(filp->f_path.dentry), DT_DIR) < 0)
-                       return stored;
-               filp->f_pos = f_pos = 2;
-               stored++;
        }
 
        affs_lock_dir(inode);
-       chain_pos = (f_pos - 2) & 0xffff;
-       hash_pos  = (f_pos - 2) >> 16;
+       chain_pos = (ctx->pos - 2) & 0xffff;
+       hash_pos  = (ctx->pos - 2) >> 16;
        if (chain_pos == 0xffff) {
                affs_warning(sb, "readdir", "More than 65535 entries in chain");
                chain_pos = 0;
                hash_pos++;
-               filp->f_pos = ((hash_pos << 16) | chain_pos) + 2;
+               ctx->pos = ((hash_pos << 16) | chain_pos) + 2;
        }
        dir_bh = affs_bread(sb, inode->i_ino);
        if (!dir_bh)
@@ -94,8 +77,8 @@ affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        /* If the directory hasn't changed since the last call to readdir(),
         * we can jump directly to where we left off.
         */
-       ino = (u32)(long)filp->private_data;
-       if (ino && filp->f_version == inode->i_version) {
+       ino = (u32)(long)file->private_data;
+       if (ino && file->f_version == inode->i_version) {
                pr_debug("AFFS: readdir() left off=%d\n", ino);
                goto inside;
        }
@@ -105,7 +88,7 @@ affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                fh_bh = affs_bread(sb, ino);
                if (!fh_bh) {
                        affs_error(sb, "readdir","Cannot read block %d", i);
-                       goto readdir_out;
+                       return -EIO;
                }
                ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
                affs_brelse(fh_bh);
@@ -119,38 +102,34 @@ affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]);
                if (!ino)
                        continue;
-               f_pos = (hash_pos << 16) + 2;
+               ctx->pos = (hash_pos << 16) + 2;
 inside:
                do {
                        fh_bh = affs_bread(sb, ino);
                        if (!fh_bh) {
                                affs_error(sb, "readdir","Cannot read block %d", ino);
-                               goto readdir_done;
+                               break;
                        }
 
                        namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)30);
                        name = AFFS_TAIL(sb, fh_bh)->name + 1;
                        pr_debug("AFFS: readdir(): filldir(\"%.*s\", ino=%u), hash=%d, f_pos=%x\n",
-                                namelen, name, ino, hash_pos, f_pos);
-                       if (filldir(dirent, name, namelen, f_pos, ino, DT_UNKNOWN) < 0)
+                                namelen, name, ino, hash_pos, (u32)ctx->pos);
+                       if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN))
                                goto readdir_done;
-                       stored++;
-                       f_pos++;
+                       ctx->pos++;
                        ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain);
                        affs_brelse(fh_bh);
                        fh_bh = NULL;
                } while (ino);
        }
 readdir_done:
-       filp->f_pos = f_pos;
-       filp->f_version = inode->i_version;
-       filp->private_data = (void *)(long)ino;
-       res = stored;
+       file->f_version = inode->i_version;
+       file->private_data = (void *)(long)ino;
 
 readdir_out:
        affs_brelse(dir_bh);
        affs_brelse(fh_bh);
        affs_unlock_dir(inode);
-       pr_debug("AFFS: readdir()=%d\n", stored);
-       return res;
+       return 0;
 }
index 7a465ed04444dd4fc193eb7507961dcb013ad66b..34494fbead0ab892733afb344e002ab1c9a08994 100644 (file)
@@ -22,7 +22,7 @@
 static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
                                 unsigned int flags);
 static int afs_dir_open(struct inode *inode, struct file *file);
-static int afs_readdir(struct file *file, void *dirent, filldir_t filldir);
+static int afs_readdir(struct file *file, struct dir_context *ctx);
 static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
 static int afs_d_delete(const struct dentry *dentry);
 static void afs_d_release(struct dentry *dentry);
@@ -43,7 +43,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
 const struct file_operations afs_dir_file_operations = {
        .open           = afs_dir_open,
        .release        = afs_release,
-       .readdir        = afs_readdir,
+       .iterate        = afs_readdir,
        .lock           = afs_lock,
        .llseek         = generic_file_llseek,
 };
@@ -119,9 +119,9 @@ struct afs_dir_page {
 };
 
 struct afs_lookup_cookie {
+       struct dir_context ctx;
        struct afs_fid  fid;
-       const char      *name;
-       size_t          nlen;
+       struct qstr name;
        int             found;
 };
 
@@ -228,20 +228,18 @@ static int afs_dir_open(struct inode *inode, struct file *file)
 /*
  * deal with one block in an AFS directory
  */
-static int afs_dir_iterate_block(unsigned *fpos,
+static int afs_dir_iterate_block(struct dir_context *ctx,
                                 union afs_dir_block *block,
-                                unsigned blkoff,
-                                void *cookie,
-                                filldir_t filldir)
+                                unsigned blkoff)
 {
        union afs_dirent *dire;
        unsigned offset, next, curr;
        size_t nlen;
-       int tmp, ret;
+       int tmp;
 
-       _enter("%u,%x,%p,,",*fpos,blkoff,block);
+       _enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block);
 
-       curr = (*fpos - blkoff) / sizeof(union afs_dirent);
+       curr = (ctx->pos - blkoff) / sizeof(union afs_dirent);
 
        /* walk through the block, an entry at a time */
        for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries;
@@ -256,7 +254,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
                        _debug("ENT[%Zu.%u]: unused",
                               blkoff / sizeof(union afs_dir_block), offset);
                        if (offset >= curr)
-                               *fpos = blkoff +
+                               ctx->pos = blkoff +
                                        next * sizeof(union afs_dirent);
                        continue;
                }
@@ -302,19 +300,15 @@ static int afs_dir_iterate_block(unsigned *fpos,
                        continue;
 
                /* found the next entry */
-               ret = filldir(cookie,
-                             dire->u.name,
-                             nlen,
-                             blkoff + offset * sizeof(union afs_dirent),
+               if (!dir_emit(ctx, dire->u.name, nlen,
                              ntohl(dire->u.vnode),
-                             filldir == afs_lookup_filldir ?
-                             ntohl(dire->u.unique) : DT_UNKNOWN);
-               if (ret < 0) {
+                             ctx->actor == afs_lookup_filldir ?
+                             ntohl(dire->u.unique) : DT_UNKNOWN)) {
                        _leave(" = 0 [full]");
                        return 0;
                }
 
-               *fpos = blkoff + next * sizeof(union afs_dirent);
+               ctx->pos = blkoff + next * sizeof(union afs_dirent);
        }
 
        _leave(" = 1 [more]");
@@ -324,8 +318,8 @@ static int afs_dir_iterate_block(unsigned *fpos,
 /*
  * iterate through the data blob that lists the contents of an AFS directory
  */
-static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
-                          filldir_t filldir, struct key *key)
+static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
+                          struct key *key)
 {
        union afs_dir_block *dblock;
        struct afs_dir_page *dbuf;
@@ -333,7 +327,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
        unsigned blkoff, limit;
        int ret;
 
-       _enter("{%lu},%u,,", dir->i_ino, *fpos);
+       _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
 
        if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
                _leave(" = -ESTALE");
@@ -341,13 +335,13 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
        }
 
        /* round the file position up to the next entry boundary */
-       *fpos += sizeof(union afs_dirent) - 1;
-       *fpos &= ~(sizeof(union afs_dirent) - 1);
+       ctx->pos += sizeof(union afs_dirent) - 1;
+       ctx->pos &= ~(sizeof(union afs_dirent) - 1);
 
        /* walk through the blocks in sequence */
        ret = 0;
-       while (*fpos < dir->i_size) {
-               blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1);
+       while (ctx->pos < dir->i_size) {
+               blkoff = ctx->pos & ~(sizeof(union afs_dir_block) - 1);
 
                /* fetch the appropriate page from the directory */
                page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key);
@@ -364,8 +358,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
                do {
                        dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
                                               sizeof(union afs_dir_block)];
-                       ret = afs_dir_iterate_block(fpos, dblock, blkoff,
-                                                   cookie, filldir);
+                       ret = afs_dir_iterate_block(ctx, dblock, blkoff);
                        if (ret != 1) {
                                afs_dir_put_page(page);
                                goto out;
@@ -373,7 +366,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
 
                        blkoff += sizeof(union afs_dir_block);
 
-               } while (*fpos < dir->i_size && blkoff < limit);
+               } while (ctx->pos < dir->i_size && blkoff < limit);
 
                afs_dir_put_page(page);
                ret = 0;
@@ -387,23 +380,10 @@ out:
 /*
  * read an AFS directory
  */
-static int afs_readdir(struct file *file, void *cookie, filldir_t filldir)
+static int afs_readdir(struct file *file, struct dir_context *ctx)
 {
-       unsigned fpos;
-       int ret;
-
-       _enter("{%Ld,{%lu}}",
-              file->f_pos, file_inode(file)->i_ino);
-
-       ASSERT(file->private_data != NULL);
-
-       fpos = file->f_pos;
-       ret = afs_dir_iterate(file_inode(file), &fpos,
-                             cookie, filldir, file->private_data);
-       file->f_pos = fpos;
-
-       _leave(" = %d", ret);
-       return ret;
+       return afs_dir_iterate(file_inode(file), 
+                             ctx, file->private_data);
 }
 
 /*
@@ -416,15 +396,16 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
 {
        struct afs_lookup_cookie *cookie = _cookie;
 
-       _enter("{%s,%Zu},%s,%u,,%llu,%u",
-              cookie->name, cookie->nlen, name, nlen,
+       _enter("{%s,%u},%s,%u,,%llu,%u",
+              cookie->name.name, cookie->name.len, name, nlen,
               (unsigned long long) ino, dtype);
 
        /* insanity checks first */
        BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
        BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
 
-       if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) {
+       if (cookie->name.len != nlen ||
+           memcmp(cookie->name.name, name, nlen) != 0) {
                _leave(" = 0 [no]");
                return 0;
        }
@@ -444,24 +425,18 @@ static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
 static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
                         struct afs_fid *fid, struct key *key)
 {
-       struct afs_lookup_cookie cookie;
-       struct afs_super_info *as;
-       unsigned fpos;
+       struct afs_super_info *as = dir->i_sb->s_fs_info;
+       struct afs_lookup_cookie cookie = {
+               .ctx.actor = afs_lookup_filldir,
+               .name = dentry->d_name,
+               .fid.vid = as->volume->vid
+       };
        int ret;
 
        _enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name);
 
-       as = dir->i_sb->s_fs_info;
-
        /* search the directory */
-       cookie.name     = dentry->d_name.name;
-       cookie.nlen     = dentry->d_name.len;
-       cookie.fid.vid  = as->volume->vid;
-       cookie.found    = 0;
-
-       fpos = 0;
-       ret = afs_dir_iterate(dir, &fpos, &cookie, afs_lookup_filldir,
-                             key);
+       ret = afs_dir_iterate(dir, &cookie.ctx, key);
        if (ret < 0) {
                _leave(" = %d [iter]", ret);
                return ret;
index 8f6e9234d5654a6b397500762048aff9da394baf..66d50fe2ee459a887511381e8e375db72d2bf1f3 100644 (file)
@@ -19,7 +19,8 @@
 #include "internal.h"
 
 static int afs_readpage(struct file *file, struct page *page);
-static void afs_invalidatepage(struct page *page, unsigned long offset);
+static void afs_invalidatepage(struct page *page, unsigned int offset,
+                              unsigned int length);
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 static int afs_launder_page(struct page *page);
 
@@ -310,16 +311,17 @@ static int afs_launder_page(struct page *page)
  * - release a page and clean up its private data if offset is 0 (indicating
  *   the entire page)
  */
-static void afs_invalidatepage(struct page *page, unsigned long offset)
+static void afs_invalidatepage(struct page *page, unsigned int offset,
+                              unsigned int length)
 {
        struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
 
-       _enter("{%lu},%lu", page->index, offset);
+       _enter("{%lu},%u,%u", page->index, offset, length);
 
        BUG_ON(!PageLocked(page));
 
        /* we clean up only if the entire page is being invalidated */
-       if (offset == 0) {
+       if (offset == 0 && length == PAGE_CACHE_SIZE) {
 #ifdef CONFIG_AFS_FSCACHE
                if (PageFsCache(page)) {
                        struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
index 085da86e07c22031f14b999d6caaf77063dfbde7..ca8e55548d9893eebf82e4261e79dc84c3b06409 100644 (file)
@@ -41,7 +41,7 @@ const struct file_operations autofs4_root_operations = {
        .open           = dcache_dir_open,
        .release        = dcache_dir_close,
        .read           = generic_read_dir,
-       .readdir        = dcache_readdir,
+       .iterate        = dcache_readdir,
        .llseek         = dcache_dir_lseek,
        .unlocked_ioctl = autofs4_root_ioctl,
 #ifdef CONFIG_COMPAT
@@ -53,7 +53,7 @@ const struct file_operations autofs4_dir_operations = {
        .open           = autofs4_dir_open,
        .release        = dcache_dir_close,
        .read           = generic_read_dir,
-       .readdir        = dcache_readdir,
+       .iterate        = dcache_readdir,
        .llseek         = dcache_dir_lseek,
 };
 
index 922ad460bff9857e39b6719001508e02e9b2b434..7c93953030fbe5eda13d76b6a8c53d6f2a31902d 100644 (file)
@@ -45,7 +45,7 @@ static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        return -EIO;
 }
 
-static int bad_file_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int bad_file_readdir(struct file *file, struct dir_context *ctx)
 {
        return -EIO;
 }
@@ -152,7 +152,7 @@ static const struct file_operations bad_file_ops =
        .write          = bad_file_write,
        .aio_read       = bad_file_aio_read,
        .aio_write      = bad_file_aio_write,
-       .readdir        = bad_file_readdir,
+       .iterate        = bad_file_readdir,
        .poll           = bad_file_poll,
        .unlocked_ioctl = bad_file_unlocked_ioctl,
        .compat_ioctl   = bad_file_compat_ioctl,
index f95dddced968f6f4509f1536b80462baa1c4393b..e9c75e20db32d43b550506f5f8f62b2656f760e7 100644 (file)
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
 /* The units the vfs expects inode->i_blocks to be in */
 #define VFS_BLOCK_SIZE 512
 
-static int befs_readdir(struct file *, void *, filldir_t);
+static int befs_readdir(struct file *, struct dir_context *);
 static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 static int befs_readpage(struct file *file, struct page *page);
 static sector_t befs_bmap(struct address_space *mapping, sector_t block);
@@ -66,7 +66,7 @@ static struct kmem_cache *befs_inode_cachep;
 
 static const struct file_operations befs_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = befs_readdir,
+       .iterate        = befs_readdir,
        .llseek         = generic_file_llseek,
 };
 
@@ -211,9 +211,9 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
 }
 
 static int
-befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+befs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
        befs_off_t value;
@@ -221,15 +221,14 @@ befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        size_t keysize;
        unsigned char d_type;
        char keybuf[BEFS_NAME_LEN + 1];
-       char *nlsname;
-       int nlsnamelen;
-       const char *dirname = filp->f_path.dentry->d_name.name;
+       const char *dirname = file->f_path.dentry->d_name.name;
 
        befs_debug(sb, "---> befs_readdir() "
-                  "name %s, inode %ld, filp->f_pos %Ld",
-                  dirname, inode->i_ino, filp->f_pos);
+                  "name %s, inode %ld, ctx->pos %Ld",
+                  dirname, inode->i_ino, ctx->pos);
 
-       result = befs_btree_read(sb, ds, filp->f_pos, BEFS_NAME_LEN + 1,
+more:
+       result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1,
                                 keybuf, &keysize, &value);
 
        if (result == BEFS_ERR) {
@@ -251,24 +250,29 @@ befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 
        /* Convert to NLS */
        if (BEFS_SB(sb)->nls) {
+               char *nlsname;
+               int nlsnamelen;
                result =
                    befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen);
                if (result < 0) {
                        befs_debug(sb, "<--- befs_readdir() ERROR");
                        return result;
                }
-               result = filldir(dirent, nlsname, nlsnamelen, filp->f_pos,
-                                (ino_t) value, d_type);
+               if (!dir_emit(ctx, nlsname, nlsnamelen,
+                                (ino_t) value, d_type)) {
+                       kfree(nlsname);
+                       return 0;
+               }
                kfree(nlsname);
-
        } else {
-               result = filldir(dirent, keybuf, keysize, filp->f_pos,
-                                (ino_t) value, d_type);
+               if (!dir_emit(ctx, keybuf, keysize,
+                                (ino_t) value, d_type))
+                       return 0;
        }
-       if (!result)
-               filp->f_pos++;
+       ctx->pos++;
+       goto more;
 
-       befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
+       befs_debug(sb, "<--- befs_readdir() pos %Ld", ctx->pos);
 
        return 0;
 }
index 3f422f6bb5caffa3e4c8c246d28a80ea06c468cb..a399e6d9dc74d1a1d2fb225af24685886cd5ec59 100644 (file)
@@ -26,58 +26,51 @@ static struct buffer_head *bfs_find_entry(struct inode *dir,
                                const unsigned char *name, int namelen,
                                struct bfs_dirent **res_dir);
 
-static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir)
+static int bfs_readdir(struct file *f, struct dir_context *ctx)
 {
        struct inode *dir = file_inode(f);
        struct buffer_head *bh;
        struct bfs_dirent *de;
-       struct bfs_sb_info *info = BFS_SB(dir->i_sb);
        unsigned int offset;
        int block;
 
-       mutex_lock(&info->bfs_lock);
-
-       if (f->f_pos & (BFS_DIRENT_SIZE - 1)) {
+       if (ctx->pos & (BFS_DIRENT_SIZE - 1)) {
                printf("Bad f_pos=%08lx for %s:%08lx\n",
-                                       (unsigned long)f->f_pos,
+                                       (unsigned long)ctx->pos,
                                        dir->i_sb->s_id, dir->i_ino);
-               mutex_unlock(&info->bfs_lock);
-               return -EBADF;
+               return -EINVAL;
        }
 
-       while (f->f_pos < dir->i_size) {
-               offset = f->f_pos & (BFS_BSIZE - 1);
-               block = BFS_I(dir)->i_sblock + (f->f_pos >> BFS_BSIZE_BITS);
+       while (ctx->pos < dir->i_size) {
+               offset = ctx->pos & (BFS_BSIZE - 1);
+               block = BFS_I(dir)->i_sblock + (ctx->pos >> BFS_BSIZE_BITS);
                bh = sb_bread(dir->i_sb, block);
                if (!bh) {
-                       f->f_pos += BFS_BSIZE - offset;
+                       ctx->pos += BFS_BSIZE - offset;
                        continue;
                }
                do {
                        de = (struct bfs_dirent *)(bh->b_data + offset);
                        if (de->ino) {
                                int size = strnlen(de->name, BFS_NAMELEN);
-                               if (filldir(dirent, de->name, size, f->f_pos,
+                               if (!dir_emit(ctx, de->name, size,
                                                le16_to_cpu(de->ino),
-                                               DT_UNKNOWN) < 0) {
+                                               DT_UNKNOWN)) {
                                        brelse(bh);
-                                       mutex_unlock(&info->bfs_lock);
                                        return 0;
                                }
                        }
                        offset += BFS_DIRENT_SIZE;
-                       f->f_pos += BFS_DIRENT_SIZE;
-               } while ((offset < BFS_BSIZE) && (f->f_pos < dir->i_size));
+                       ctx->pos += BFS_DIRENT_SIZE;
+               } while ((offset < BFS_BSIZE) && (ctx->pos < dir->i_size));
                brelse(bh);
        }
-
-       mutex_unlock(&info->bfs_lock);
-       return 0;       
+       return 0;
 }
 
 const struct file_operations bfs_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = bfs_readdir,
+       .iterate        = bfs_readdir,
        .fsync          = generic_file_fsync,
        .llseek         = generic_file_llseek,
 };
index f26f38ccd1942bb8c27fddb05d25995c9a4e3e1b..eb34438ddedbc8ca0377fd6410d831fb3e824f7e 100644 (file)
@@ -1681,8 +1681,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
  *
  */
-int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
-                                   filldir_t filldir,
+int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
                                    struct list_head *ins_list)
 {
        struct btrfs_dir_item *di;
@@ -1704,13 +1703,13 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
                list_del(&curr->readdir_list);
 
-               if (curr->key.offset < filp->f_pos) {
+               if (curr->key.offset < ctx->pos) {
                        if (atomic_dec_and_test(&curr->refs))
                                kfree(curr);
                        continue;
                }
 
-               filp->f_pos = curr->key.offset;
+               ctx->pos = curr->key.offset;
 
                di = (struct btrfs_dir_item *)curr->data;
                name = (char *)(di + 1);
@@ -1719,7 +1718,7 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
                d_type = btrfs_filetype_table[di->type];
                btrfs_disk_key_to_cpu(&location, &di->location);
 
-               over = filldir(dirent, name, name_len, curr->key.offset,
+               over = !dir_emit(ctx, name, name_len,
                               location.objectid, d_type);
 
                if (atomic_dec_and_test(&curr->refs))
index 1d5c5f7abe3e01bf872913d123f5c43b2797fd2b..a4b38f934d1471c3518c6d1e8e6f887adfdb7a70 100644 (file)
@@ -139,8 +139,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
                             struct list_head *del_list);
 int btrfs_should_delete_dir_index(struct list_head *del_list,
                                  u64 index);
-int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
-                                   filldir_t filldir,
+int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
                                    struct list_head *ins_list);
 
 /* for init */
index b8b60b660c8f833cb38bf93823e1a3738e32669a..b0292b3ead54d1651ba47d7e9efcc567566dd1ed 100644 (file)
@@ -1013,7 +1013,8 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
        return try_release_extent_buffer(page);
 }
 
-static void btree_invalidatepage(struct page *page, unsigned long offset)
+static void btree_invalidatepage(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
        struct extent_io_tree *tree;
        tree = &BTRFS_I(page->mapping->host)->io_tree;
index e7e7afb4a87268211e8b0ef881a6eeac0068eefd..6bca9472f313cda2cb7ad1f230dda69bf4b1e8a9 100644 (file)
@@ -2957,7 +2957,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
        if (page->index > end_index ||
           (page->index == end_index && !pg_offset)) {
-               page->mapping->a_ops->invalidatepage(page, 0);
+               page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
                unlock_page(page);
                return 0;
        }
index 17f3064b4a3ebf7b65188be58b66713670f00ca9..4f9d16b70d3d87da9dd6e3cae926dbaaf4fa3345 100644 (file)
@@ -5137,10 +5137,9 @@ unsigned char btrfs_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
 };
 
-static int btrfs_real_readdir(struct file *filp, void *dirent,
-                             filldir_t filldir)
+static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_item *item;
        struct btrfs_dir_item *di;
@@ -5161,29 +5160,15 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        char tmp_name[32];
        char *name_ptr;
        int name_len;
-       int is_curr = 0;        /* filp->f_pos points to the current index? */
+       int is_curr = 0;        /* ctx->pos points to the current index? */
 
        /* FIXME, use a real flag for deciding about the key type */
        if (root->fs_info->tree_root == root)
                key_type = BTRFS_DIR_ITEM_KEY;
 
-       /* special case for "." */
-       if (filp->f_pos == 0) {
-               over = filldir(dirent, ".", 1,
-                              filp->f_pos, btrfs_ino(inode), DT_DIR);
-               if (over)
-                       return 0;
-               filp->f_pos = 1;
-       }
-       /* special case for .., just use the back ref */
-       if (filp->f_pos == 1) {
-               u64 pino = parent_ino(filp->f_path.dentry);
-               over = filldir(dirent, "..", 2,
-                              filp->f_pos, pino, DT_DIR);
-               if (over)
-                       return 0;
-               filp->f_pos = 2;
-       }
+       if (!dir_emit_dots(file, ctx))
+               return 0;
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -5197,7 +5182,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        }
 
        btrfs_set_key_type(&key, key_type);
-       key.offset = filp->f_pos;
+       key.offset = ctx->pos;
        key.objectid = btrfs_ino(inode);
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -5223,14 +5208,14 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                        break;
                if (btrfs_key_type(&found_key) != key_type)
                        break;
-               if (found_key.offset < filp->f_pos)
+               if (found_key.offset < ctx->pos)
                        goto next;
                if (key_type == BTRFS_DIR_INDEX_KEY &&
                    btrfs_should_delete_dir_index(&del_list,
                                                  found_key.offset))
                        goto next;
 
-               filp->f_pos = found_key.offset;
+               ctx->pos = found_key.offset;
                is_curr = 1;
 
                di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
@@ -5274,9 +5259,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                                over = 0;
                                goto skip;
                        }
-                       over = filldir(dirent, name_ptr, name_len,
-                                      found_key.offset, location.objectid,
-                                      d_type);
+                       over = !dir_emit(ctx, name_ptr, name_len,
+                                      location.objectid, d_type);
 
 skip:
                        if (name_ptr != tmp_name)
@@ -5295,9 +5279,8 @@ next:
 
        if (key_type == BTRFS_DIR_INDEX_KEY) {
                if (is_curr)
-                       filp->f_pos++;
-               ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
-                                                     &ins_list);
+                       ctx->pos++;
+               ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
                if (ret)
                        goto nopos;
        }
@@ -5308,9 +5291,9 @@ next:
                 * 32-bit glibc will use getdents64, but then strtol -
                 * so the last number we can serve is this.
                 */
-               filp->f_pos = 0x7fffffff;
+               ctx->pos = 0x7fffffff;
        else
-               filp->f_pos++;
+               ctx->pos++;
 nopos:
        ret = 0;
 err:
@@ -7510,7 +7493,8 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
        return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
 }
 
-static void btrfs_invalidatepage(struct page *page, unsigned long offset)
+static void btrfs_invalidatepage(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
        struct inode *inode = page->mapping->host;
        struct extent_io_tree *tree;
@@ -8731,7 +8715,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
 static const struct file_operations btrfs_dir_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = btrfs_real_readdir,
+       .iterate        = btrfs_real_readdir,
        .unlocked_ioctl = btrfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = btrfs_ioctl,
index d2a4d1bb2d57aec3999e494d52c4f765a0ae48e8..f93392e2df126fd5c17833b7105338fc6f65be5f 100644 (file)
@@ -1454,7 +1454,8 @@ static void discard_buffer(struct buffer_head * bh)
  * block_invalidatepage - invalidate part or all of a buffer-backed page
  *
  * @page: the page which is affected
- * @offset: the index of the truncation point
+ * @offset: start of the range to invalidate
+ * @length: length of the range to invalidate
  *
  * block_invalidatepage() is called when all or part of the page has become
  * invalidated by a truncate operation.
@@ -1465,21 +1466,34 @@ static void discard_buffer(struct buffer_head * bh)
  * point.  Because the caller is about to free (and possibly reuse) those
  * blocks on-disk.
  */
-void block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned int offset,
+                         unsigned int length)
 {
        struct buffer_head *head, *bh, *next;
        unsigned int curr_off = 0;
+       unsigned int stop = length + offset;
 
        BUG_ON(!PageLocked(page));
        if (!page_has_buffers(page))
                goto out;
 
+       /*
+        * Check for overflow
+        */
+       BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+
        head = page_buffers(page);
        bh = head;
        do {
                unsigned int next_off = curr_off + bh->b_size;
                next = bh->b_this_page;
 
+               /*
+                * Are we still fully in range ?
+                */
+               if (next_off > stop)
+                       goto out;
+
                /*
                 * is this block fully invalidated?
                 */
@@ -1501,6 +1515,7 @@ out:
 }
 EXPORT_SYMBOL(block_invalidatepage);
 
+
 /*
  * We attach and possibly dirty the buffers atomically wrt
  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
@@ -2841,7 +2856,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
                 * they may have been added in ext3_writepage().  Make them
                 * freeable here, so the page does not leak.
                 */
-               do_invalidatepage(page, 0);
+               do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
                unlock_page(page);
                return 0; /* don't care */
        }
index 746ce532e130ac3f49b7a31096b95fda6ef1d18e..d4c1206af9fca6009a7591a8d36b36684043bf52 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/mount.h>
 #include "internal.h"
 
-#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
-
 struct cachefiles_lookup_data {
        struct cachefiles_xattr *auxdata;       /* auxiliary data */
        char                    *key;           /* key path */
@@ -212,20 +210,29 @@ static void cachefiles_update_object(struct fscache_object *_object)
        object = container_of(_object, struct cachefiles_object, fscache);
        cache = container_of(object->fscache.cache, struct cachefiles_cache,
                             cache);
+
+       if (!fscache_use_cookie(_object)) {
+               _leave(" [relinq]");
+               return;
+       }
+
        cookie = object->fscache.cookie;
 
        if (!cookie->def->get_aux) {
+               fscache_unuse_cookie(_object);
                _leave(" [no aux]");
                return;
        }
 
        auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp);
        if (!auxdata) {
+               fscache_unuse_cookie(_object);
                _leave(" [nomem]");
                return;
        }
 
        auxlen = cookie->def->get_aux(cookie->netfs_data, auxdata->data, 511);
+       fscache_unuse_cookie(_object);
        ASSERTCMP(auxlen, <, 511);
 
        auxdata->len = auxlen + 1;
@@ -263,7 +270,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
 #endif
 
        /* delete retired objects */
-       if (object->fscache.state == FSCACHE_OBJECT_RECYCLING &&
+       if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) &&
            _object != cache->cache.fsdef
            ) {
                _debug("- retire object OBJ%x", object->fscache.debug_id);
index 8c01c5fcdf75c32bab21a6265599ba2a02cdee83..25badd1aec5c677215f20d9bd970261deab2dd35 100644 (file)
@@ -38,7 +38,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
        printk(KERN_ERR "%sobject: OBJ%x\n",
               prefix, object->fscache.debug_id);
        printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
-              prefix, fscache_object_states[object->fscache.state],
+              prefix, object->fscache.state->name,
               object->fscache.flags, work_busy(&object->fscache.work),
               object->fscache.events, object->fscache.event_mask);
        printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
@@ -127,10 +127,10 @@ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
 found_dentry:
        kdebug("preemptive burial: OBJ%x [%s] %p",
               object->fscache.debug_id,
-              fscache_object_states[object->fscache.state],
+              object->fscache.state->name,
               dentry);
 
-       if (object->fscache.state < FSCACHE_OBJECT_DYING) {
+       if (fscache_object_is_live(&object->fscache)) {
                printk(KERN_ERR "\n");
                printk(KERN_ERR "CacheFiles: Error:"
                       " Can't preemptively bury live object\n");
@@ -192,7 +192,7 @@ try_again:
        /* an old object from a previous incarnation is hogging the slot - we
         * need to wait for it to be destroyed */
 wait_for_old_object:
-       if (xobject->fscache.state < FSCACHE_OBJECT_DYING) {
+       if (fscache_object_is_live(&object->fscache)) {
                printk(KERN_ERR "\n");
                printk(KERN_ERR "CacheFiles: Error:"
                       " Unexpected object collision\n");
@@ -836,7 +836,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
        //       dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
 
        /* look up the victim */
-       mutex_lock_nested(&dir->d_inode->i_mutex, 1);
+       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
 
        start = jiffies;
        victim = lookup_one_len(filename, dir, strlen(filename));
index 73b46288b54b839c523289a5471a376f14097e9c..2476e5162609ffc4db6be49549e27999e288f06f 100644 (file)
@@ -109,13 +109,12 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
        struct dentry *dentry = object->dentry;
        int ret;
 
-       ASSERT(object->fscache.cookie);
        ASSERT(dentry);
 
        _enter("%p,#%d", object, auxdata->len);
 
        /* attempt to install the cache metadata directly */
-       _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
+       _debug("SET #%u", auxdata->len);
 
        ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
                           &auxdata->type, auxdata->len,
@@ -138,13 +137,12 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
        struct dentry *dentry = object->dentry;
        int ret;
 
-       ASSERT(object->fscache.cookie);
        ASSERT(dentry);
 
        _enter("%p,#%d", object, auxdata->len);
 
        /* attempt to install the cache metadata directly */
-       _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
+       _debug("SET #%u", auxdata->len);
 
        ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
                           &auxdata->type, auxdata->len,
index 3e68ac1010407b23617cf1f6e019834d2eee1670..38b5c1bc6776ceeeab66b77bb683c3acb23716e0 100644 (file)
@@ -143,7 +143,8 @@ static int ceph_set_page_dirty(struct page *page)
  * dirty page counters appropriately.  Only called if there is private
  * data on the page.
  */
-static void ceph_invalidatepage(struct page *page, unsigned long offset)
+static void ceph_invalidatepage(struct page *page, unsigned int offset,
+                               unsigned int length)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
@@ -163,20 +164,20 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
        if (!PageDirty(page))
                pr_err("%p invalidatepage %p page not dirty\n", inode, page);
 
-       if (offset == 0)
+       if (offset == 0 && length == PAGE_CACHE_SIZE)
                ClearPageChecked(page);
 
        ci = ceph_inode(inode);
-       if (offset == 0) {
-               dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
-                    inode, page, page->index, offset);
+       if (offset == 0 && length == PAGE_CACHE_SIZE) {
+               dout("%p invalidatepage %p idx %lu full dirty page\n",
+                    inode, page, page->index);
                ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
                ceph_put_snap_context(snapc);
                page->private = 0;
                ClearPagePrivate(page);
        } else {
-               dout("%p invalidatepage %p idx %lu partial dirty page\n",
-                    inode, page, page->index);
+               dout("%p invalidatepage %p idx %lu partial dirty page %u(%u)\n",
+                    inode, page, page->index, offset, length);
        }
 }
 
index f02d82b7933e4399abd235a2527403df3364a2fa..a40ceda47a3218ee53c2167d8844899c5de3e9cf 100644 (file)
@@ -111,11 +111,10 @@ static unsigned fpos_off(loff_t p)
  * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  * the MDS if/when the directory is modified).
  */
-static int __dcache_readdir(struct file *filp,
-                           void *dirent, filldir_t filldir)
+static int __dcache_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct ceph_file_info *fi = filp->private_data;
-       struct dentry *parent = filp->f_dentry;
+       struct ceph_file_info *fi = file->private_data;
+       struct dentry *parent = file->f_dentry;
        struct inode *dir = parent->d_inode;
        struct list_head *p;
        struct dentry *dentry, *last;
@@ -126,14 +125,14 @@ static int __dcache_readdir(struct file *filp,
        last = fi->dentry;
        fi->dentry = NULL;
 
-       dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
+       dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos,
             last);
 
        spin_lock(&parent->d_lock);
 
        /* start at beginning? */
-       if (filp->f_pos == 2 || last == NULL ||
-           filp->f_pos < ceph_dentry(last)->offset) {
+       if (ctx->pos == 2 || last == NULL ||
+           ctx->pos < ceph_dentry(last)->offset) {
                if (list_empty(&parent->d_subdirs))
                        goto out_unlock;
                p = parent->d_subdirs.prev;
@@ -157,11 +156,11 @@ more:
                if (!d_unhashed(dentry) && dentry->d_inode &&
                    ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
                    ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
-                   filp->f_pos <= di->offset)
+                   ctx->pos <= di->offset)
                        break;
                dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
                     dentry->d_name.len, dentry->d_name.name, di->offset,
-                    filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
+                    ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
                     !dentry->d_inode ? " null" : "");
                spin_unlock(&dentry->d_lock);
                p = p->prev;
@@ -173,29 +172,27 @@ more:
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
 
-       dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
+       dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
             dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
-       filp->f_pos = di->offset;
-       err = filldir(dirent, dentry->d_name.name,
-                     dentry->d_name.len, di->offset,
+       ctx->pos = di->offset;
+       if (!dir_emit(ctx, dentry->d_name.name,
+                     dentry->d_name.len,
                      ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
-                     dentry->d_inode->i_mode >> 12);
-
-       if (last) {
-               if (err < 0) {
+                     dentry->d_inode->i_mode >> 12)) {
+               if (last) {
                        /* remember our position */
                        fi->dentry = last;
                        fi->next_offset = di->offset;
-               } else {
-                       dput(last);
                }
+               dput(dentry);
+               return 0;
        }
-       last = dentry;
 
-       if (err < 0)
-               goto out;
+       if (last)
+               dput(last);
+       last = dentry;
 
-       filp->f_pos++;
+       ctx->pos++;
 
        /* make sure a dentry wasn't dropped while we didn't have parent lock */
        if (!ceph_dir_is_complete(dir)) {
@@ -235,59 +232,59 @@ static int note_last_dentry(struct ceph_file_info *fi, const char *name,
        return 0;
 }
 
-static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int ceph_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct ceph_file_info *fi = filp->private_data;
-       struct inode *inode = file_inode(filp);
+       struct ceph_file_info *fi = file->private_data;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
-       unsigned frag = fpos_frag(filp->f_pos);
-       int off = fpos_off(filp->f_pos);
+       unsigned frag = fpos_frag(ctx->pos);
+       int off = fpos_off(ctx->pos);
        int err;
        u32 ftype;
        struct ceph_mds_reply_info_parsed *rinfo;
        const int max_entries = fsc->mount_options->max_readdir;
        const int max_bytes = fsc->mount_options->max_readdir_bytes;
 
-       dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
+       dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
        if (fi->flags & CEPH_F_ATEND)
                return 0;
 
        /* always start with . and .. */
-       if (filp->f_pos == 0) {
+       if (ctx->pos == 0) {
                /* note dir version at start of readdir so we can tell
                 * if any dentries get dropped */
                fi->dir_release_count = atomic_read(&ci->i_release_count);
 
                dout("readdir off 0 -> '.'\n");
-               if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
+               if (!dir_emit(ctx, ".", 1, 
                            ceph_translate_ino(inode->i_sb, inode->i_ino),
-                           inode->i_mode >> 12) < 0)
+                           inode->i_mode >> 12))
                        return 0;
-               filp->f_pos = 1;
+               ctx->pos = 1;
                off = 1;
        }
-       if (filp->f_pos == 1) {
-               ino_t ino = parent_ino(filp->f_dentry);
+       if (ctx->pos == 1) {
+               ino_t ino = parent_ino(file->f_dentry);
                dout("readdir off 1 -> '..'\n");
-               if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
+               if (!dir_emit(ctx, "..", 2,
                            ceph_translate_ino(inode->i_sb, ino),
-                           inode->i_mode >> 12) < 0)
+                           inode->i_mode >> 12))
                        return 0;
-               filp->f_pos = 2;
+               ctx->pos = 2;
                off = 2;
        }
 
        /* can we use the dcache? */
        spin_lock(&ci->i_ceph_lock);
-       if ((filp->f_pos == 2 || fi->dentry) &&
+       if ((ctx->pos == 2 || fi->dentry) &&
            !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
            ceph_snap(inode) != CEPH_SNAPDIR &&
            __ceph_dir_is_complete(ci) &&
            __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
                spin_unlock(&ci->i_ceph_lock);
-               err = __dcache_readdir(filp, dirent, filldir);
+               err = __dcache_readdir(file, ctx);
                if (err != -EAGAIN)
                        return err;
        } else {
@@ -327,7 +324,7 @@ more:
                        return PTR_ERR(req);
                req->r_inode = inode;
                ihold(inode);
-               req->r_dentry = dget(filp->f_dentry);
+               req->r_dentry = dget(file->f_dentry);
                /* hints to request -> mds selection code */
                req->r_direct_mode = USE_AUTH_MDS;
                req->r_direct_hash = ceph_frag_value(frag);
@@ -379,15 +376,16 @@ more:
        rinfo = &fi->last_readdir->r_reply_info;
        dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
             rinfo->dir_nr, off, fi->offset);
+
+       ctx->pos = ceph_make_fpos(frag, off);
        while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
-               u64 pos = ceph_make_fpos(frag, off);
                struct ceph_mds_reply_inode *in =
                        rinfo->dir_in[off - fi->offset].in;
                struct ceph_vino vino;
                ino_t ino;
 
                dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
-                    off, off - fi->offset, rinfo->dir_nr, pos,
+                    off, off - fi->offset, rinfo->dir_nr, ctx->pos,
                     rinfo->dir_dname_len[off - fi->offset],
                     rinfo->dir_dname[off - fi->offset], in);
                BUG_ON(!in);
@@ -395,16 +393,15 @@ more:
                vino.ino = le64_to_cpu(in->ino);
                vino.snap = le64_to_cpu(in->snapid);
                ino = ceph_vino_to_ino(vino);
-               if (filldir(dirent,
+               if (!dir_emit(ctx,
                            rinfo->dir_dname[off - fi->offset],
                            rinfo->dir_dname_len[off - fi->offset],
-                           pos,
-                           ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
+                           ceph_translate_ino(inode->i_sb, ino), ftype)) {
                        dout("filldir stopping us...\n");
                        return 0;
                }
                off++;
-               filp->f_pos = pos + 1;
+               ctx->pos++;
        }
 
        if (fi->last_name) {
@@ -417,7 +414,7 @@ more:
        if (!ceph_frag_is_rightmost(frag)) {
                frag = ceph_frag_next(frag);
                off = 0;
-               filp->f_pos = ceph_make_fpos(frag, off);
+               ctx->pos = ceph_make_fpos(frag, off);
                dout("readdir next frag is %x\n", frag);
                goto more;
        }
@@ -432,11 +429,11 @@ more:
        if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
                dout(" marking %p complete\n", inode);
                __ceph_dir_set_complete(ci, fi->dir_release_count);
-               ci->i_max_offset = filp->f_pos;
+               ci->i_max_offset = ctx->pos;
        }
        spin_unlock(&ci->i_ceph_lock);
 
-       dout("readdir %p filp %p done.\n", inode, filp);
+       dout("readdir %p file %p done.\n", inode, file);
        return 0;
 }
 
@@ -1268,7 +1265,7 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
 
 const struct file_operations ceph_dir_fops = {
        .read = ceph_read_dir,
-       .readdir = ceph_readdir,
+       .iterate = ceph_readdir,
        .llseek = ceph_dir_llseek,
        .open = ceph_open,
        .release = ceph_release,
index 3752b9f6d9e46e90876b18f4d14527f9262baff6..540c1ccfcdb28a310614f24b873f81f4c4e652d3 100644 (file)
@@ -968,7 +968,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
 };
 
 const struct file_operations cifs_dir_ops = {
-       .readdir = cifs_readdir,
+       .iterate = cifs_readdir,
        .release = cifs_closedir,
        .read    = generic_read_dir,
        .unlocked_ioctl  = cifs_ioctl,
index 0e32c3446ce9330b21898c59e72bd5430c47e48b..d05b3028e3b96ef7c99eaa202d10bc22630489e7 100644 (file)
@@ -101,7 +101,7 @@ extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
 extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
 extern const struct file_operations cifs_dir_ops;
 extern int cifs_dir_open(struct inode *inode, struct file *file);
-extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
+extern int cifs_readdir(struct file *file, struct dir_context *ctx);
 
 /* Functions related to dir entries */
 extern const struct dentry_operations cifs_dentry_ops;
index 48b29d24c9f4d58e225d060b70da4fc2e5ea5e5e..4d8ba8d491e5d0be27bfbf8506175f7ffd8ce476 100644 (file)
@@ -3546,11 +3546,12 @@ static int cifs_release_page(struct page *page, gfp_t gfp)
        return cifs_fscache_release_page(page, gfp);
 }
 
-static void cifs_invalidate_page(struct page *page, unsigned long offset)
+static void cifs_invalidate_page(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
        struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
 
-       if (offset == 0)
+       if (offset == 0 && length == PAGE_CACHE_SIZE)
                cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
 }
 
index 770d5a9781c1ccc0a67d033aafd66d3e4473f929..f1213799de1a2603b1f8f1c8d42f8b0fb2cb5a64 100644 (file)
@@ -537,14 +537,14 @@ static int cifs_save_resume_key(const char *current_entry,
  * every entry (do not increment for . or .. entry).
  */
 static int
-find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon,
+find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
                struct file *file, char **current_entry, int *num_to_ret)
 {
        __u16 search_flags;
        int rc = 0;
        int pos_in_buf = 0;
        loff_t first_entry_in_buffer;
-       loff_t index_to_find = file->f_pos;
+       loff_t index_to_find = pos;
        struct cifsFileInfo *cfile = file->private_data;
        struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        struct TCP_Server_Info *server = tcon->ses->server;
@@ -659,8 +659,9 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon,
        return rc;
 }
 
-static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir,
-               void *dirent, char *scratch_buf, unsigned int max_len)
+static int cifs_filldir(char *find_entry, struct file *file,
+               struct dir_context *ctx,
+               char *scratch_buf, unsigned int max_len)
 {
        struct cifsFileInfo *file_info = file->private_data;
        struct super_block *sb = file->f_path.dentry->d_sb;
@@ -740,13 +741,11 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir,
        cifs_prime_dcache(file->f_dentry, &name, &fattr);
 
        ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
-       rc = filldir(dirent, name.name, name.len, file->f_pos, ino,
-                    fattr.cf_dtype);
-       return rc;
+       return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
 }
 
 
-int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
+int cifs_readdir(struct file *file, struct dir_context *ctx)
 {
        int rc = 0;
        unsigned int xid;
@@ -772,103 +771,86 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
                        goto rddir2_exit;
        }
 
-       switch ((int) file->f_pos) {
-       case 0:
-               if (filldir(direntry, ".", 1, file->f_pos,
-                    file_inode(file)->i_ino, DT_DIR) < 0) {
-                       cifs_dbg(VFS, "Filldir for current dir failed\n");
-                       rc = -ENOMEM;
-                       break;
-               }
-               file->f_pos++;
-       case 1:
-               if (filldir(direntry, "..", 2, file->f_pos,
-                    parent_ino(file->f_path.dentry), DT_DIR) < 0) {
-                       cifs_dbg(VFS, "Filldir for parent dir failed\n");
-                       rc = -ENOMEM;
-                       break;
-               }
-               file->f_pos++;
-       default:
-               /* 1) If search is active,
-                       is in current search buffer?
-                       if it before then restart search
-                       if after then keep searching till find it */
-
-               if (file->private_data == NULL) {
-                       rc = -EINVAL;
-                       free_xid(xid);
-                       return rc;
-               }
-               cifsFile = file->private_data;
-               if (cifsFile->srch_inf.endOfSearch) {
-                       if (cifsFile->srch_inf.emptyDir) {
-                               cifs_dbg(FYI, "End of search, empty dir\n");
-                               rc = 0;
-                               break;
-                       }
-               } /* else {
-                       cifsFile->invalidHandle = true;
-                       tcon->ses->server->close(xid, tcon, &cifsFile->fid);
-               } */
+       if (!dir_emit_dots(file, ctx))
+               goto rddir2_exit;
 
-               tcon = tlink_tcon(cifsFile->tlink);
-               rc = find_cifs_entry(xid, tcon, file, &current_entry,
-                                    &num_to_fill);
-               if (rc) {
-                       cifs_dbg(FYI, "fce error %d\n", rc);
-                       goto rddir2_exit;
-               } else if (current_entry != NULL) {
-                       cifs_dbg(FYI, "entry %lld found\n", file->f_pos);
-               } else {
-                       cifs_dbg(FYI, "could not find entry\n");
+       /* 1) If search is active,
+               is in current search buffer?
+               if it before then restart search
+               if after then keep searching till find it */
+
+       if (file->private_data == NULL) {
+               rc = -EINVAL;
+               goto rddir2_exit;
+       }
+       cifsFile = file->private_data;
+       if (cifsFile->srch_inf.endOfSearch) {
+               if (cifsFile->srch_inf.emptyDir) {
+                       cifs_dbg(FYI, "End of search, empty dir\n");
+                       rc = 0;
                        goto rddir2_exit;
                }
-               cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
-                        num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
-               max_len = tcon->ses->server->ops->calc_smb_size(
-                               cifsFile->srch_inf.ntwrk_buf_start);
-               end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
-
-               tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
-               if (tmp_buf == NULL) {
-                       rc = -ENOMEM;
+       } /* else {
+               cifsFile->invalidHandle = true;
+               tcon->ses->server->close(xid, tcon, &cifsFile->fid);
+       } */
+
+       tcon = tlink_tcon(cifsFile->tlink);
+       rc = find_cifs_entry(xid, tcon, ctx->pos, file, &current_entry,
+                            &num_to_fill);
+       if (rc) {
+               cifs_dbg(FYI, "fce error %d\n", rc);
+               goto rddir2_exit;
+       } else if (current_entry != NULL) {
+               cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
+       } else {
+               cifs_dbg(FYI, "could not find entry\n");
+               goto rddir2_exit;
+       }
+       cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
+                num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
+       max_len = tcon->ses->server->ops->calc_smb_size(
+                       cifsFile->srch_inf.ntwrk_buf_start);
+       end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
+
+       tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
+       if (tmp_buf == NULL) {
+               rc = -ENOMEM;
+               goto rddir2_exit;
+       }
+
+       for (i = 0; i < num_to_fill; i++) {
+               if (current_entry == NULL) {
+                       /* evaluate whether this case is an error */
+                       cifs_dbg(VFS, "past SMB end,  num to fill %d i %d\n",
+                                num_to_fill, i);
                        break;
                }
-
-               for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
-                       if (current_entry == NULL) {
-                               /* evaluate whether this case is an error */
-                               cifs_dbg(VFS, "past SMB end,  num to fill %d i %d\n",
-                                        num_to_fill, i);
-                               break;
-                       }
-                       /*
-                        * if buggy server returns . and .. late do we want to
-                        * check for that here?
-                        */
-                       rc = cifs_filldir(current_entry, file, filldir,
-                                         direntry, tmp_buf, max_len);
-                       if (rc == -EOVERFLOW) {
+               /*
+                * if buggy server returns . and .. late do we want to
+                * check for that here?
+                */
+               rc = cifs_filldir(current_entry, file, ctx,
+                                 tmp_buf, max_len);
+               if (rc) {
+                       if (rc > 0)
                                rc = 0;
-                               break;
-                       }
-
-                       file->f_pos++;
-                       if (file->f_pos ==
-                               cifsFile->srch_inf.index_of_last_entry) {
-                               cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
-                                        file->f_pos, tmp_buf);
-                               cifs_save_resume_key(current_entry, cifsFile);
-                               break;
-                       } else
-                               current_entry =
-                                       nxt_dir_entry(current_entry, end_of_smb,
-                                               cifsFile->srch_inf.info_level);
+                       break;
                }
-               kfree(tmp_buf);
-               break;
-       } /* end switch */
+
+               ctx->pos++;
+               if (ctx->pos ==
+                       cifsFile->srch_inf.index_of_last_entry) {
+                       cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
+                                ctx->pos, tmp_buf);
+                       cifs_save_resume_key(current_entry, cifsFile);
+                       break;
+               } else
+                       current_entry =
+                               nxt_dir_entry(current_entry, end_of_smb,
+                                       cifsFile->srch_inf.info_level);
+       }
+       kfree(tmp_buf);
 
 rddir2_exit:
        free_xid(xid);
index b7d3a05c062c0517bdcee3aedf01619f60bddc7a..87e0ee9f4465e5b0bfc7bde48657314be18258b3 100644 (file)
@@ -43,15 +43,14 @@ static int coda_rename(struct inode *old_inode, struct dentry *old_dentry,
                        struct inode *new_inode, struct dentry *new_dentry);
 
 /* dir file-ops */
-static int coda_readdir(struct file *file, void *buf, filldir_t filldir);
+static int coda_readdir(struct file *file, struct dir_context *ctx);
 
 /* dentry ops */
 static int coda_dentry_revalidate(struct dentry *de, unsigned int flags);
 static int coda_dentry_delete(const struct dentry *);
 
 /* support routines */
-static int coda_venus_readdir(struct file *coda_file, void *buf,
-                             filldir_t filldir);
+static int coda_venus_readdir(struct file *, struct dir_context *);
 
 /* same as fs/bad_inode.c */
 static int coda_return_EIO(void)
@@ -85,7 +84,7 @@ const struct inode_operations coda_dir_inode_operations =
 const struct file_operations coda_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = coda_readdir,
+       .iterate        = coda_readdir,
        .open           = coda_open,
        .release        = coda_release,
        .fsync          = coda_fsync,
@@ -378,7 +377,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 
 /* file operations for directories */
-static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
+static int coda_readdir(struct file *coda_file, struct dir_context *ctx)
 {
        struct coda_file_info *cfi;
        struct file *host_file;
@@ -391,30 +390,19 @@ static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
        if (!host_file->f_op)
                return -ENOTDIR;
 
-       if (host_file->f_op->readdir)
-       {
-               /* potemkin case: we were handed a directory inode.
-                * We can't use vfs_readdir because we have to keep the file
-                * position in sync between the coda_file and the host_file.
-                * and as such we need grab the inode mutex. */
+       if (host_file->f_op->iterate) {
                struct inode *host_inode = file_inode(host_file);
-
                mutex_lock(&host_inode->i_mutex);
-               host_file->f_pos = coda_file->f_pos;
-
                ret = -ENOENT;
                if (!IS_DEADDIR(host_inode)) {
-                       ret = host_file->f_op->readdir(host_file, buf, filldir);
+                       ret = host_file->f_op->iterate(host_file, ctx);
                        file_accessed(host_file);
                }
-
-               coda_file->f_pos = host_file->f_pos;
                mutex_unlock(&host_inode->i_mutex);
+               return ret;
        }
-       else /* Venus: we must read Venus dirents from a file */
-               ret = coda_venus_readdir(coda_file, buf, filldir);
-
-       return ret;
+       /* Venus: we must read Venus dirents from a file */
+       return coda_venus_readdir(coda_file, ctx);
 }
 
 static inline unsigned int CDT2DT(unsigned char cdt)
@@ -437,10 +425,8 @@ static inline unsigned int CDT2DT(unsigned char cdt)
 }
 
 /* support routines */
-static int coda_venus_readdir(struct file *coda_file, void *buf,
-                             filldir_t filldir)
+static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx)
 {
-       int result = 0; /* # of entries returned */
        struct coda_file_info *cfi;
        struct coda_inode_info *cii;
        struct file *host_file;
@@ -462,23 +448,12 @@ static int coda_venus_readdir(struct file *coda_file, void *buf,
        vdir = kmalloc(sizeof(*vdir), GFP_KERNEL);
        if (!vdir) return -ENOMEM;
 
-       if (coda_file->f_pos == 0) {
-               ret = filldir(buf, ".", 1, 0, de->d_inode->i_ino, DT_DIR);
-               if (ret < 0)
-                       goto out;
-               result++;
-               coda_file->f_pos++;
-       }
-       if (coda_file->f_pos == 1) {
-               ret = filldir(buf, "..", 2, 1, parent_ino(de), DT_DIR);
-               if (ret < 0)
-                       goto out;
-               result++;
-               coda_file->f_pos++;
-       }
+       if (!dir_emit_dots(coda_file, ctx))
+               goto out;
+
        while (1) {
                /* read entries from the directory file */
-               ret = kernel_read(host_file, coda_file->f_pos - 2, (char *)vdir,
+               ret = kernel_read(host_file, ctx->pos - 2, (char *)vdir,
                                  sizeof(*vdir));
                if (ret < 0) {
                        printk(KERN_ERR "coda readdir: read dir %s failed %d\n",
@@ -507,7 +482,7 @@ static int coda_venus_readdir(struct file *coda_file, void *buf,
 
                /* Make sure we skip '.' and '..', we already got those */
                if (name.name[0] == '.' && (name.len == 1 ||
-                   (vdir->d_name[1] == '.' && name.len == 2)))
+                   (name.name[1] == '.' && name.len == 2)))
                        vdir->d_fileno = name.len = 0;
 
                /* skip null entries */
@@ -520,19 +495,16 @@ static int coda_venus_readdir(struct file *coda_file, void *buf,
                        if (!ino) ino = vdir->d_fileno;
 
                        type = CDT2DT(vdir->d_type);
-                       ret = filldir(buf, name.name, name.len,
-                                     coda_file->f_pos, ino, type);
-                       /* failure means no space for filling in this round */
-                       if (ret < 0) break;
-                       result++;
+                       if (!dir_emit(ctx, name.name, name.len, ino, type))
+                               break;
                }
                /* we'll always have progress because d_reclen is unsigned and
                 * we've already established it is non-zero. */
-               coda_file->f_pos += vdir->d_reclen;
+               ctx->pos += vdir->d_reclen;
        }
 out:
        kfree(vdir);
-       return result ? result : ret;
+       return 0;
 }
 
 /* called when a cache lookup succeeds */
index fc3b55dce184a2637fcbf14d1dc313e61714ad5d..6af20de2c1a3c29d5cc7c251d8fb3fa2182de445 100644 (file)
@@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
 };
 
 struct compat_readdir_callback {
+       struct dir_context ctx;
        struct compat_old_linux_dirent __user *dirent;
        int result;
 };
@@ -873,15 +874,15 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
 {
        int error;
        struct fd f = fdget(fd);
-       struct compat_readdir_callback buf;
+       struct compat_readdir_callback buf = {
+               .ctx.actor = compat_fillonedir,
+               .dirent = dirent
+       };
 
        if (!f.file)
                return -EBADF;
 
-       buf.result = 0;
-       buf.dirent = dirent;
-
-       error = vfs_readdir(f.file, compat_fillonedir, &buf);
+       error = iterate_dir(f.file, &buf.ctx);
        if (buf.result)
                error = buf.result;
 
@@ -897,6 +898,7 @@ struct compat_linux_dirent {
 };
 
 struct compat_getdents_callback {
+       struct dir_context ctx;
        struct compat_linux_dirent __user *current_dir;
        struct compat_linux_dirent __user *previous;
        int count;
@@ -951,7 +953,11 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
 {
        struct fd f;
        struct compat_linux_dirent __user * lastdirent;
-       struct compat_getdents_callback buf;
+       struct compat_getdents_callback buf = {
+               .ctx.actor = compat_filldir,
+               .current_dir = dirent,
+               .count = count
+       };
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -961,17 +967,12 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
        if (!f.file)
                return -EBADF;
 
-       buf.current_dir = dirent;
-       buf.previous = NULL;
-       buf.count = count;
-       buf.error = 0;
-
-       error = vfs_readdir(f.file, compat_filldir, &buf);
+       error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               if (put_user(f.file->f_pos, &lastdirent->d_off))
+               if (put_user(buf.ctx.pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
@@ -983,6 +984,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
 #ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64
 
 struct compat_getdents_callback64 {
+       struct dir_context ctx;
        struct linux_dirent64 __user *current_dir;
        struct linux_dirent64 __user *previous;
        int count;
@@ -1036,7 +1038,11 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
 {
        struct fd f;
        struct linux_dirent64 __user * lastdirent;
-       struct compat_getdents_callback64 buf;
+       struct compat_getdents_callback64 buf = {
+               .ctx.actor = compat_filldir64,
+               .current_dir = dirent,
+               .count = count
+       };
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -1046,17 +1052,12 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
        if (!f.file)
                return -EBADF;
 
-       buf.current_dir = dirent;
-       buf.previous = NULL;
-       buf.count = count;
-       buf.error = 0;
-
-       error = vfs_readdir(f.file, compat_filldir64, &buf);
+       error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               typeof(lastdirent->d_off) d_off = f.file->f_pos;
+               typeof(lastdirent->d_off) d_off = buf.ctx.pos;
                if (__put_user_unaligned(d_off, &lastdirent->d_off))
                        error = -EFAULT;
                else
index 996cdc5abb85dae304b85cd479aaa20de5135a5e..5d19acfa7c6c652a6b8134ec79507457be463216 100644 (file)
@@ -66,7 +66,6 @@
 #include <linux/gigaset_dev.h>
 
 #ifdef CONFIG_BLOCK
-#include <linux/loop.h>
 #include <linux/cdrom.h>
 #include <linux/fd.h>
 #include <scsi/scsi.h>
@@ -954,8 +953,6 @@ COMPATIBLE_IOCTL(MTIOCTOP)
 /* Socket level stuff */
 COMPATIBLE_IOCTL(FIOQSIZE)
 #ifdef CONFIG_BLOCK
-/* loop */
-IGNORE_IOCTL(LOOP_CLR_FD)
 /* md calls this on random blockdevs */
 IGNORE_IOCTL(RAID_VERSION)
 /* qemu/qemu-img might call these two on plain files for probing */
index 7aabc6ad4e9bbda4cec09f30545f7d828b0c431a..64e5323cbbb014c4c2240c733d667b2e704bcdcc 100644 (file)
@@ -1532,84 +1532,66 @@ static inline unsigned char dt_type(struct configfs_dirent *sd)
        return (sd->s_mode >> 12) & 15;
 }
 
-static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static int configfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *dentry = file->f_path.dentry;
        struct super_block *sb = dentry->d_sb;
        struct configfs_dirent * parent_sd = dentry->d_fsdata;
-       struct configfs_dirent *cursor = filp->private_data;
+       struct configfs_dirent *cursor = file->private_data;
        struct list_head *p, *q = &cursor->s_sibling;
        ino_t ino = 0;
-       int i = filp->f_pos;
 
-       switch (i) {
-               case 0:
-                       ino = dentry->d_inode->i_ino;
-                       if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
-                               break;
-                       filp->f_pos++;
-                       i++;
-                       /* fallthrough */
-               case 1:
-                       ino = parent_ino(dentry);
-                       if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
-                               break;
-                       filp->f_pos++;
-                       i++;
-                       /* fallthrough */
-               default:
-                       if (filp->f_pos == 2) {
-                               spin_lock(&configfs_dirent_lock);
-                               list_move(q, &parent_sd->s_children);
-                               spin_unlock(&configfs_dirent_lock);
-                       }
-                       for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
-                               struct configfs_dirent *next;
-                               const char * name;
-                               int len;
-                               struct inode *inode = NULL;
+       if (!dir_emit_dots(file, ctx))
+               return 0;
+       if (ctx->pos == 2) {
+               spin_lock(&configfs_dirent_lock);
+               list_move(q, &parent_sd->s_children);
+               spin_unlock(&configfs_dirent_lock);
+       }
+       for (p = q->next; p != &parent_sd->s_children; p = p->next) {
+               struct configfs_dirent *next;
+               const char *name;
+               int len;
+               struct inode *inode = NULL;
+
+               next = list_entry(p, struct configfs_dirent, s_sibling);
+               if (!next->s_element)
+                       continue;
 
-                               next = list_entry(p, struct configfs_dirent,
-                                                  s_sibling);
-                               if (!next->s_element)
-                                       continue;
-
-                               name = configfs_get_name(next);
-                               len = strlen(name);
-
-                               /*
-                                * We'll have a dentry and an inode for
-                                * PINNED items and for open attribute
-                                * files.  We lock here to prevent a race
-                                * with configfs_d_iput() clearing
-                                * s_dentry before calling iput().
-                                *
-                                * Why do we go to the trouble?  If
-                                * someone has an attribute file open,
-                                * the inode number should match until
-                                * they close it.  Beyond that, we don't
-                                * care.
-                                */
-                               spin_lock(&configfs_dirent_lock);
-                               dentry = next->s_dentry;
-                               if (dentry)
-                                       inode = dentry->d_inode;
-                               if (inode)
-                                       ino = inode->i_ino;
-                               spin_unlock(&configfs_dirent_lock);
-                               if (!inode)
-                                       ino = iunique(sb, 2);
+               name = configfs_get_name(next);
+               len = strlen(name);
+
+               /*
+                * We'll have a dentry and an inode for
+                * PINNED items and for open attribute
+                * files.  We lock here to prevent a race
+                * with configfs_d_iput() clearing
+                * s_dentry before calling iput().
+                *
+                * Why do we go to the trouble?  If
+                * someone has an attribute file open,
+                * the inode number should match until
+                * they close it.  Beyond that, we don't
+                * care.
+                */
+               spin_lock(&configfs_dirent_lock);
+               dentry = next->s_dentry;
+               if (dentry)
+                       inode = dentry->d_inode;
+               if (inode)
+                       ino = inode->i_ino;
+               spin_unlock(&configfs_dirent_lock);
+               if (!inode)
+                       ino = iunique(sb, 2);
 
-                               if (filldir(dirent, name, len, filp->f_pos, ino,
-                                                dt_type(next)) < 0)
-                                       return 0;
+               if (!dir_emit(ctx, name, len, ino, dt_type(next)))
+                       return 0;
 
-                               spin_lock(&configfs_dirent_lock);
-                               list_move(q, p);
-                               spin_unlock(&configfs_dirent_lock);
-                               p = q;
-                               filp->f_pos++;
-                       }
+               spin_lock(&configfs_dirent_lock);
+               list_move(q, p);
+               spin_unlock(&configfs_dirent_lock);
+               p = q;
+               ctx->pos++;
        }
        return 0;
 }
@@ -1661,7 +1643,7 @@ const struct file_operations configfs_dir_operations = {
        .release        = configfs_dir_close,
        .llseek         = configfs_dir_lseek,
        .read           = generic_read_dir,
-       .readdir        = configfs_readdir,
+       .iterate        = configfs_readdir,
 };
 
 int configfs_register_subsystem(struct configfs_subsystem *subsys)
index 35b1c7bd18b758a30fc03fd4267151d18fcb2281..e501ac3a49ff7380a1bd2644ecc8abc3f648ce64 100644 (file)
@@ -349,18 +349,17 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 /*
  * Read a cramfs directory entry.
  */
-static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int cramfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        char *buf;
        unsigned int offset;
-       int copied;
 
        /* Offset within the thing. */
-       offset = filp->f_pos;
-       if (offset >= inode->i_size)
+       if (ctx->pos >= inode->i_size)
                return 0;
+       offset = ctx->pos;
        /* Directory entries are always 4-byte aligned */
        if (offset & 3)
                return -EINVAL;
@@ -369,14 +368,13 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (!buf)
                return -ENOMEM;
 
-       copied = 0;
        while (offset < inode->i_size) {
                struct cramfs_inode *de;
                unsigned long nextoffset;
                char *name;
                ino_t ino;
                umode_t mode;
-               int namelen, error;
+               int namelen;
 
                mutex_lock(&read_mutex);
                de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
@@ -402,13 +400,10 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                break;
                        namelen--;
                }
-               error = filldir(dirent, buf, namelen, offset, ino, mode >> 12);
-               if (error)
+               if (!dir_emit(ctx, buf, namelen, ino, mode >> 12))
                        break;
 
-               offset = nextoffset;
-               filp->f_pos = offset;
-               copied++;
+               ctx->pos = offset = nextoffset;
        }
        kfree(buf);
        return 0;
@@ -547,7 +542,7 @@ static const struct address_space_operations cramfs_aops = {
 static const struct file_operations cramfs_directory_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = cramfs_readdir,
+       .iterate        = cramfs_readdir,
 };
 
 static const struct inode_operations cramfs_dir_inode_operations = {
index f09b9085f7d849e235a2b8d77c079de71c96f5d2..5a23073138dfe310fb076e81567bac97cd0be9b6 100644 (file)
@@ -1612,6 +1612,10 @@ EXPORT_SYMBOL(d_obtain_alias);
  * If a dentry was found and moved, then it is returned.  Otherwise NULL
  * is returned.  This matches the expected return value of ->lookup.
  *
+ * Cluster filesystems may call this function with a negative, hashed dentry.
+ * In that case, we know that the inode will be a regular file, and also this
+ * will only occur during atomic_open. So we need to check for the dentry
+ * being already hashed only in the final case.
  */
 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
 {
@@ -1636,8 +1640,11 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
                        security_d_instantiate(dentry, inode);
                        d_rehash(dentry);
                }
-       } else
-               d_add(dentry, inode);
+       } else {
+               d_instantiate(dentry, inode);
+               if (d_unhashed(dentry))
+                       d_rehash(dentry);
+       }
        return new;
 }
 EXPORT_SYMBOL(d_splice_alias);
index 7d58d5b112b559ccc9a9145a003f15e076d8a27b..76feb4b60fa6d0a307d70b216a7d4a91cdf389ba 100644 (file)
@@ -138,8 +138,9 @@ static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf)
 static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl,
                                          const char *buf, size_t len)
 {
-       strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN);
-       strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN);
+       strlcpy(dlm_config.ci_cluster_name, buf,
+                               sizeof(dlm_config.ci_cluster_name));
+       strlcpy(cl->cl_cluster_name, buf, sizeof(cl->cl_cluster_name));
        return len;
 }
 
index 1b1146670c4b9c881b1b3a40d62a01b150cecd9d..e223a911a8346691d065e573f0ce0ab0ea9465fc 100644 (file)
@@ -2038,8 +2038,8 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
        b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
        if (b == 1) {
                int len = receive_extralen(ms);
-               if (len > DLM_RESNAME_MAXLEN)
-                       len = DLM_RESNAME_MAXLEN;
+               if (len > r->res_ls->ls_lvblen)
+                       len = r->res_ls->ls_lvblen;
                memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
                lkb->lkb_lvbseq = ms->m_lvbseq;
        }
@@ -3893,8 +3893,8 @@ static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
                if (!lkb->lkb_lvbptr)
                        return -ENOMEM;
                len = receive_extralen(ms);
-               if (len > DLM_RESNAME_MAXLEN)
-                       len = DLM_RESNAME_MAXLEN;
+               if (len > ls->ls_lvblen)
+                       len = ls->ls_lvblen;
                memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
        }
        return 0;
index 3ca79d3253b9b195e698084f76cc23e686b5a0a7..88556dc0458ee045659ed5a91875fb51d7ac2264 100644 (file)
@@ -883,17 +883,24 @@ int dlm_release_lockspace(void *lockspace, int force)
 void dlm_stop_lockspaces(void)
 {
        struct dlm_ls *ls;
+       int count;
 
  restart:
+       count = 0;
        spin_lock(&lslist_lock);
        list_for_each_entry(ls, &lslist, ls_list) {
-               if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
+               if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
+                       count++;
                        continue;
+               }
                spin_unlock(&lslist_lock);
                log_error(ls, "no userland control daemon, stopping lockspace");
                dlm_ls_stop(ls);
                goto restart;
        }
        spin_unlock(&lslist_lock);
+
+       if (count)
+               log_print("dlm user daemon left %d lockspaces", count);
 }
 
index d0ccd2fd79eb0430ac08db074edfb811859e4b90..d90909ec6aa6bc5dd846b0545773b8cee0bee7d8 100644 (file)
@@ -52,7 +52,6 @@
 #include <linux/mutex.h>
 #include <linux/sctp.h>
 #include <linux/slab.h>
-#include <linux/sctp.h>
 #include <net/sctp/sctp.h>
 #include <net/ipv6.h>
 
@@ -126,6 +125,7 @@ struct connection {
        struct connection *othercon;
        struct work_struct rwork; /* Receive workqueue */
        struct work_struct swork; /* Send workqueue */
+       bool try_new_addr;
 };
 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
 
@@ -144,6 +144,7 @@ struct dlm_node_addr {
        struct list_head list;
        int nodeid;
        int addr_count;
+       int curr_addr_index;
        struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
 };
 
@@ -310,7 +311,7 @@ static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
 }
 
 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
-                         struct sockaddr *sa_out)
+                         struct sockaddr *sa_out, bool try_new_addr)
 {
        struct sockaddr_storage sas;
        struct dlm_node_addr *na;
@@ -320,8 +321,16 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
 
        spin_lock(&dlm_node_addrs_spin);
        na = find_node_addr(nodeid);
-       if (na && na->addr_count)
-               memcpy(&sas, na->addr[0], sizeof(struct sockaddr_storage));
+       if (na && na->addr_count) {
+               if (try_new_addr) {
+                       na->curr_addr_index++;
+                       if (na->curr_addr_index == na->addr_count)
+                               na->curr_addr_index = 0;
+               }
+
+               memcpy(&sas, na->addr[na->curr_addr_index ],
+                       sizeof(struct sockaddr_storage));
+       }
        spin_unlock(&dlm_node_addrs_spin);
 
        if (!na)
@@ -353,19 +362,22 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
 {
        struct dlm_node_addr *na;
        int rv = -EEXIST;
+       int addr_i;
 
        spin_lock(&dlm_node_addrs_spin);
        list_for_each_entry(na, &dlm_node_addrs, list) {
                if (!na->addr_count)
                        continue;
 
-               if (!addr_compare(na->addr[0], addr))
-                       continue;
-
-               *nodeid = na->nodeid;
-               rv = 0;
-               break;
+               for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
+                       if (addr_compare(na->addr[addr_i], addr)) {
+                               *nodeid = na->nodeid;
+                               rv = 0;
+                               goto unlock;
+                       }
+               }
        }
+unlock:
        spin_unlock(&dlm_node_addrs_spin);
        return rv;
 }
@@ -561,8 +573,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd)
 
 static void sctp_init_failed_foreach(struct connection *con)
 {
+
+       /*
+        * Don't try to recover base con and handle race where the
+        * other node's assoc init creates a assoc and we get that
+        * notification, then we get a notification that our attempt
+        * failed due. This happens when we are still trying the primary
+        * address, but the other node has already tried secondary addrs
+        * and found one that worked.
+        */
+       if (!con->nodeid || con->sctp_assoc)
+               return;
+
+       log_print("Retrying SCTP association init for node %d\n", con->nodeid);
+
+       con->try_new_addr = true;
        con->sctp_assoc = 0;
-       if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+       if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) {
                if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
                        queue_work(send_workqueue, &con->swork);
        }
@@ -579,15 +606,56 @@ static void sctp_init_failed(void)
        mutex_unlock(&connections_lock);
 }
 
+static void retry_failed_sctp_send(struct connection *recv_con,
+                                  struct sctp_send_failed *sn_send_failed,
+                                  char *buf)
+{
+       int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed);
+       struct dlm_mhandle *mh;
+       struct connection *con;
+       char *retry_buf;
+       int nodeid = sn_send_failed->ssf_info.sinfo_ppid;
+
+       log_print("Retry sending %d bytes to node id %d", len, nodeid);
+
+       con = nodeid2con(nodeid, 0);
+       if (!con) {
+               log_print("Could not look up con for nodeid %d\n",
+                         nodeid);
+               return;
+       }
+
+       mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf);
+       if (!mh) {
+               log_print("Could not allocate buf for retry.");
+               return;
+       }
+       memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len);
+       dlm_lowcomms_commit_buffer(mh);
+
+       /*
+        * If we got a assoc changed event before the send failed event then
+        * we only need to retry the send.
+        */
+       if (con->sctp_assoc) {
+               if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+                       queue_work(send_workqueue, &con->swork);
+       } else
+               sctp_init_failed_foreach(con);
+}
+
 /* Something happened to an association */
 static void process_sctp_notification(struct connection *con,
                                      struct msghdr *msg, char *buf)
 {
        union sctp_notification *sn = (union sctp_notification *)buf;
 
-       if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) {
+       switch (sn->sn_header.sn_type) {
+       case SCTP_SEND_FAILED:
+               retry_failed_sctp_send(con, &sn->sn_send_failed, buf);
+               break;
+       case SCTP_ASSOC_CHANGE:
                switch (sn->sn_assoc_change.sac_state) {
-
                case SCTP_COMM_UP:
                case SCTP_RESTART:
                {
@@ -662,9 +730,11 @@ static void process_sctp_notification(struct connection *con,
                        log_print("connecting to %d sctp association %d",
                                 nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
 
+                       new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id;
+                       new_con->try_new_addr = false;
                        /* Send any pending writes */
                        clear_bit(CF_CONNECT_PENDING, &new_con->flags);
-                       clear_bit(CF_INIT_PENDING, &con->flags);
+                       clear_bit(CF_INIT_PENDING, &new_con->flags);
                        if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) {
                                queue_work(send_workqueue, &new_con->swork);
                        }
@@ -683,14 +753,10 @@ static void process_sctp_notification(struct connection *con,
                }
                break;
 
-               /* We don't know which INIT failed, so clear the PENDING flags
-                * on them all.  if assoc_id is zero then it will then try
-                * again */
-
                case SCTP_CANT_STR_ASSOC:
                {
+                       /* Will retry init when we get the send failed notification */
                        log_print("Can't start SCTP association - retrying");
-                       sctp_init_failed();
                }
                break;
 
@@ -699,6 +765,8 @@ static void process_sctp_notification(struct connection *con,
                                  (int)sn->sn_assoc_change.sac_assoc_id,
                                  sn->sn_assoc_change.sac_state);
                }
+       default:
+               ; /* fall through */
        }
 }
 
@@ -958,6 +1026,24 @@ static void free_entry(struct writequeue_entry *e)
        kfree(e);
 }
 
+/*
+ * writequeue_entry_complete - try to delete and free write queue entry
+ * @e: write queue entry to try to delete
+ * @completed: bytes completed
+ *
+ * writequeue_lock must be held.
+ */
+static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
+{
+       e->offset += completed;
+       e->len -= completed;
+
+       if (e->len == 0 && e->users == 0) {
+               list_del(&e->list);
+               free_entry(e);
+       }
+}
+
 /* Initiate an SCTP association.
    This is a special case of send_to_sock() in that we don't yet have a
    peeled-off socket for this association, so we use the listening socket
@@ -977,15 +1063,14 @@ static void sctp_init_assoc(struct connection *con)
        int addrlen;
        struct kvec iov[1];
 
+       mutex_lock(&con->sock_mutex);
        if (test_and_set_bit(CF_INIT_PENDING, &con->flags))
-               return;
-
-       if (con->retries++ > MAX_CONNECT_RETRIES)
-               return;
+               goto unlock;
 
-       if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr)) {
+       if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr,
+                          con->try_new_addr)) {
                log_print("no address for nodeid %d", con->nodeid);
-               return;
+               goto unlock;
        }
        base_con = nodeid2con(0, 0);
        BUG_ON(base_con == NULL);
@@ -1003,17 +1088,25 @@ static void sctp_init_assoc(struct connection *con)
        if (list_empty(&con->writequeue)) {
                spin_unlock(&con->writequeue_lock);
                log_print("writequeue empty for nodeid %d", con->nodeid);
-               return;
+               goto unlock;
        }
 
        e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
        len = e->len;
        offset = e->offset;
-       spin_unlock(&con->writequeue_lock);
 
        /* Send the first block off the write queue */
        iov[0].iov_base = page_address(e->page)+offset;
        iov[0].iov_len = len;
+       spin_unlock(&con->writequeue_lock);
+
+       if (rem_addr.ss_family == AF_INET) {
+               struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr;
+               log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr);
+       } else {
+               struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr;
+               log_print("Trying to connect to %pI6", &sin6->sin6_addr);
+       }
 
        cmsg = CMSG_FIRSTHDR(&outmessage);
        cmsg->cmsg_level = IPPROTO_SCTP;
@@ -1021,8 +1114,9 @@ static void sctp_init_assoc(struct connection *con)
        cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
        sinfo = CMSG_DATA(cmsg);
        memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
-       sinfo->sinfo_ppid = cpu_to_le32(dlm_our_nodeid());
+       sinfo->sinfo_ppid = cpu_to_le32(con->nodeid);
        outmessage.msg_controllen = cmsg->cmsg_len;
+       sinfo->sinfo_flags |= SCTP_ADDR_OVER;
 
        ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len);
        if (ret < 0) {
@@ -1035,15 +1129,12 @@ static void sctp_init_assoc(struct connection *con)
        }
        else {
                spin_lock(&con->writequeue_lock);
-               e->offset += ret;
-               e->len -= ret;
-
-               if (e->len == 0 && e->users == 0) {
-                       list_del(&e->list);
-                       free_entry(e);
-               }
+               writequeue_entry_complete(e, ret);
                spin_unlock(&con->writequeue_lock);
        }
+
+unlock:
+       mutex_unlock(&con->sock_mutex);
 }
 
 /* Connect a new socket to its peer */
@@ -1075,7 +1166,7 @@ static void tcp_connect_to_sock(struct connection *con)
                goto out_err;
 
        memset(&saddr, 0, sizeof(saddr));
-       result = nodeid_to_addr(con->nodeid, &saddr, NULL);
+       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
        if (result < 0) {
                log_print("no address for nodeid %d", con->nodeid);
                goto out_err;
@@ -1254,6 +1345,7 @@ static int sctp_listen_for_all(void)
        int result = -EINVAL, num = 1, i, addr_len;
        struct connection *con = nodeid2con(0, GFP_NOFS);
        int bufsize = NEEDED_RMEM;
+       int one = 1;
 
        if (!con)
                return -ENOMEM;
@@ -1288,6 +1380,11 @@ static int sctp_listen_for_all(void)
                goto create_delsock;
        }
 
+       result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
+                                  sizeof(one));
+       if (result < 0)
+               log_print("Could not set SCTP NODELAY error %d\n", result);
+
        /* Init con struct */
        sock->sk->sk_user_data = con;
        con->sock = sock;
@@ -1493,13 +1590,7 @@ static void send_to_sock(struct connection *con)
                }
 
                spin_lock(&con->writequeue_lock);
-               e->offset += ret;
-               e->len -= ret;
-
-               if (e->len == 0 && e->users == 0) {
-                       list_del(&e->list);
-                       free_entry(e);
-               }
+               writequeue_entry_complete(e, ret);
        }
        spin_unlock(&con->writequeue_lock);
 out:
index a7abbea2c09638ef8c190555ec466834c0c06edf..9aa05e08060b507f05f17a5b464fb1f695c7e3e4 100644 (file)
@@ -68,9 +68,9 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
 }
 
 struct ecryptfs_getdents_callback {
-       void *dirent;
+       struct dir_context ctx;
+       struct dir_context *caller;
        struct dentry *dentry;
-       filldir_t filldir;
        int filldir_called;
        int entries_written;
 };
@@ -96,9 +96,10 @@ ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen,
                       rc);
                goto out;
        }
-       rc = buf->filldir(buf->dirent, name, name_size, offset, ino, d_type);
+       buf->caller->pos = buf->ctx.pos;
+       rc = !dir_emit(buf->caller, name, name_size, ino, d_type);
        kfree(name);
-       if (rc >= 0)
+       if (!rc)
                buf->entries_written++;
 out:
        return rc;
@@ -107,27 +108,23 @@ out:
 /**
  * ecryptfs_readdir
  * @file: The eCryptfs directory file
- * @dirent: Directory entry handle
- * @filldir: The filldir callback function
+ * @ctx: The actor to feed the entries to
  */
-static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
 {
        int rc;
        struct file *lower_file;
        struct inode *inode;
-       struct ecryptfs_getdents_callback buf;
-
+       struct ecryptfs_getdents_callback buf = {
+               .ctx.actor = ecryptfs_filldir,
+               .caller = ctx,
+               .dentry = file->f_path.dentry
+       };
        lower_file = ecryptfs_file_to_lower(file);
-       lower_file->f_pos = file->f_pos;
+       lower_file->f_pos = ctx->pos;
        inode = file_inode(file);
-       memset(&buf, 0, sizeof(buf));
-       buf.dirent = dirent;
-       buf.dentry = file->f_path.dentry;
-       buf.filldir = filldir;
-       buf.filldir_called = 0;
-       buf.entries_written = 0;
-       rc = vfs_readdir(lower_file, ecryptfs_filldir, (void *)&buf);
-       file->f_pos = lower_file->f_pos;
+       rc = iterate_dir(lower_file, &buf.ctx);
+       ctx->pos = buf.ctx.pos;
        if (rc < 0)
                goto out;
        if (buf.filldir_called && !buf.entries_written)
@@ -344,7 +341,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 #endif
 
 const struct file_operations ecryptfs_dir_fops = {
-       .readdir = ecryptfs_readdir,
+       .iterate = ecryptfs_readdir,
        .read = generic_read_dir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
@@ -365,7 +362,7 @@ const struct file_operations ecryptfs_main_fops = {
        .aio_read = ecryptfs_read_update_atime,
        .write = do_sync_write,
        .aio_write = generic_file_aio_write,
-       .readdir = ecryptfs_readdir,
+       .iterate = ecryptfs_readdir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
index 055a9e9ca747fb4d0cbae33c14930cdd8cbe4cb7..b72307ccdf7afc08c2062741a585b971d0e99a15 100644 (file)
@@ -7,40 +7,38 @@
 #include <linux/buffer_head.h>
 #include "efs.h"
 
-static int efs_readdir(struct file *, void *, filldir_t);
+static int efs_readdir(struct file *, struct dir_context *);
 
 const struct file_operations efs_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = efs_readdir,
+       .iterate        = efs_readdir,
 };
 
 const struct inode_operations efs_dir_inode_operations = {
        .lookup         = efs_lookup,
 };
 
-static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
-       struct inode *inode = file_inode(filp);
-       struct buffer_head *bh;
-
-       struct efs_dir          *dirblock;
-       struct efs_dentry       *dirslot;
-       efs_ino_t               inodenum;
+static int efs_readdir(struct file *file, struct dir_context *ctx)
+{
+       struct inode *inode = file_inode(file);
        efs_block_t             block;
-       int                     slot, namelen;
-       char                    *nameptr;
+       int                     slot;
 
        if (inode->i_size & (EFS_DIRBSIZE-1))
                printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n");
 
        /* work out where this entry can be found */
-       block = filp->f_pos >> EFS_DIRBSIZE_BITS;
+       block = ctx->pos >> EFS_DIRBSIZE_BITS;
 
        /* each block contains at most 256 slots */
-       slot  = filp->f_pos & 0xff;
+       slot  = ctx->pos & 0xff;
 
        /* look at all blocks */
        while (block < inode->i_blocks) {
+               struct efs_dir          *dirblock;
+               struct buffer_head *bh;
+
                /* read the dir block */
                bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
 
@@ -57,11 +55,14 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
                        break;
                }
 
-               while (slot < dirblock->slots) {
-                       if (dirblock->space[slot] == 0) {
-                               slot++;
+               for (; slot < dirblock->slots; slot++) {
+                       struct efs_dentry *dirslot;
+                       efs_ino_t inodenum;
+                       const char *nameptr;
+                       int namelen;
+
+                       if (dirblock->space[slot] == 0)
                                continue;
-                       }
 
                        dirslot  = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot));
 
@@ -72,39 +73,29 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
 #ifdef DEBUG
                        printk(KERN_DEBUG "EFS: readdir(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", block, slot, dirblock->slots-1, inodenum, nameptr, namelen);
 #endif
-                       if (namelen > 0) {
-                               /* found the next entry */
-                               filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
-
-                               /* copy filename and data in dirslot */
-                               filldir(dirent, nameptr, namelen, filp->f_pos, inodenum, DT_UNKNOWN);
-
-                               /* sanity check */
-                               if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) {
-                                       printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot);
-                                       slot++;
-                                       continue;
-                               }
-
-                               /* store position of next slot */
-                               if (++slot == dirblock->slots) {
-                                       slot = 0;
-                                       block++;
-                               }
+                       if (!namelen)
+                               continue;
+                       /* found the next entry */
+                       ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot;
+
+                       /* sanity check */
+                       if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) {
+                               printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot);
+                               continue;
+                       }
+
+                       /* copy filename and data in dirslot */
+                       if (!dir_emit(ctx, nameptr, namelen, inodenum, DT_UNKNOWN)) {
                                brelse(bh);
-                               filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
-                               goto out;
+                               return 0;
                        }
-                       slot++;
                }
                brelse(bh);
 
                slot = 0;
                block++;
        }
-
-       filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
-out:
+       ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot;
        return 0;
 }
 
index 643019585574956f44bdca47c52c3993046db069..ffd7a813ad3d06ee1e1de4e996c72282c24e7039 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1135,13 +1135,6 @@ void setup_new_exec(struct linux_binprm * bprm)
                        set_dumpable(current->mm, suid_dumpable);
        }
 
-       /*
-        * Flush performance counters when crossing a
-        * security domain:
-        */
-       if (!get_dumpable(current->mm))
-               perf_event_exit_task(current);
-
        /* An exec changes our domain. We are no longer part of the thread
           group */
 
@@ -1205,6 +1198,15 @@ void install_exec_creds(struct linux_binprm *bprm)
 
        commit_creds(bprm->cred);
        bprm->cred = NULL;
+
+       /*
+        * Disable monitoring for regular users
+        * when executing setuid binaries. Must
+        * wait until new credentials are committed
+        * by commit_creds() above
+        */
+       if (get_dumpable(current->mm) != SUID_DUMP_USER)
+               perf_event_exit_task(current);
        /*
         * cred_guard_mutex must be held at least to this point to prevent
         * ptrace_attach() from altering our determination of the task's
index 46375896cfc0a8572140e9fa69d2629cd044755a..49f51ab4caac7689d48049790eae2e0b22d4c082 100644 (file)
@@ -239,22 +239,19 @@ void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode)
 }
 
 static int
-exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+exofs_readdir(struct file *file, struct dir_context *ctx)
 {
-       loff_t pos = filp->f_pos;
-       struct inode *inode = file_inode(filp);
+       loff_t pos = ctx->pos;
+       struct inode *inode = file_inode(file);
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned long npages = dir_pages(inode);
        unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
-       unsigned char *types = NULL;
-       int need_revalidate = (filp->f_version != inode->i_version);
+       int need_revalidate = (file->f_version != inode->i_version);
 
        if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1))
                return 0;
 
-       types = exofs_filetype_table;
-
        for ( ; n < npages; n++, offset = 0) {
                char *kaddr, *limit;
                struct exofs_dir_entry *de;
@@ -263,7 +260,7 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                if (IS_ERR(page)) {
                        EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
                                  inode->i_ino);
-                       filp->f_pos += PAGE_CACHE_SIZE - offset;
+                       ctx->pos += PAGE_CACHE_SIZE - offset;
                        return PTR_ERR(page);
                }
                kaddr = page_address(page);
@@ -271,9 +268,9 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        if (offset) {
                                offset = exofs_validate_entry(kaddr, offset,
                                                                chunk_mask);
-                               filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
+                               ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
                        }
-                       filp->f_version = inode->i_version;
+                       file->f_version = inode->i_version;
                        need_revalidate = 0;
                }
                de = (struct exofs_dir_entry *)(kaddr + offset);
@@ -288,27 +285,24 @@ exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                return -EIO;
                        }
                        if (de->inode_no) {
-                               int over;
-                               unsigned char d_type = DT_UNKNOWN;
+                               unsigned char t;
 
-                               if (types && de->file_type < EXOFS_FT_MAX)
-                                       d_type = types[de->file_type];
+                               if (de->file_type < EXOFS_FT_MAX)
+                                       t = exofs_filetype_table[de->file_type];
+                               else
+                                       t = DT_UNKNOWN;
 
-                               offset = (char *)de - kaddr;
-                               over = filldir(dirent, de->name, de->name_len,
-                                               (n<<PAGE_CACHE_SHIFT) | offset,
+                               if (!dir_emit(ctx, de->name, de->name_len,
                                                le64_to_cpu(de->inode_no),
-                                               d_type);
-                               if (over) {
+                                               t)) {
                                        exofs_put_page(page);
                                        return 0;
                                }
                        }
-                       filp->f_pos += le16_to_cpu(de->rec_len);
+                       ctx->pos += le16_to_cpu(de->rec_len);
                }
                exofs_put_page(page);
        }
-
        return 0;
 }
 
@@ -669,5 +663,5 @@ not_empty:
 const struct file_operations exofs_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = exofs_readdir,
+       .iterate        = exofs_readdir,
 };
index d1f80abd8828fcf3d8adadea9246effef03b7e29..2ec8eb1ab269ae292d338d5d97ee48990af5b2c7 100644 (file)
@@ -953,9 +953,11 @@ static int exofs_releasepage(struct page *page, gfp_t gfp)
        return 0;
 }
 
-static void exofs_invalidatepage(struct page *page, unsigned long offset)
+static void exofs_invalidatepage(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
-       EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
+       EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n",
+                    page->index, offset, length);
        WARN_ON(1);
 }
 
index 262fc99409824327b316af89a945c65cae8c9888..293bc2e47a735807a75eaad424764315172367b6 100644 (file)
@@ -212,6 +212,7 @@ reconnect_path(struct vfsmount *mnt, struct dentry *target_dir, char *nbuf)
 }
 
 struct getdents_callback {
+       struct dir_context ctx;
        char *name;             /* name that was found. It already points to a
                                   buffer NAME_MAX+1 is size */
        unsigned long ino;      /* the inum we are looking for */
@@ -254,7 +255,11 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
        struct inode *dir = path->dentry->d_inode;
        int error;
        struct file *file;
-       struct getdents_callback buffer;
+       struct getdents_callback buffer = {
+               .ctx.actor = filldir_one,
+               .name = name,
+               .ino = child->d_inode->i_ino
+       };
 
        error = -ENOTDIR;
        if (!dir || !S_ISDIR(dir->i_mode))
@@ -271,17 +276,14 @@ static int get_name(const struct path *path, char *name, struct dentry *child)
                goto out;
 
        error = -EINVAL;
-       if (!file->f_op->readdir)
+       if (!file->f_op->iterate)
                goto out_close;
 
-       buffer.name = name;
-       buffer.ino = child->d_inode->i_ino;
-       buffer.found = 0;
        buffer.sequence = 0;
        while (1) {
                int old_seq = buffer.sequence;
 
-               error = vfs_readdir(file, filldir_one, &buffer);
+               error = iterate_dir(file, &buffer.ctx);
                if (buffer.found) {
                        error = 0;
                        break;
index 4237722bfd27173c34c9835cb1e28deeaa2e72f6..6e1d4ab09d7226e4e519ea00a2e1941dd793ca39 100644 (file)
@@ -287,17 +287,17 @@ static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
 }
 
 static int
-ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
+ext2_readdir(struct file *file, struct dir_context *ctx)
 {
-       loff_t pos = filp->f_pos;
-       struct inode *inode = file_inode(filp);
+       loff_t pos = ctx->pos;
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned long npages = dir_pages(inode);
        unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
        unsigned char *types = NULL;
-       int need_revalidate = filp->f_version != inode->i_version;
+       int need_revalidate = file->f_version != inode->i_version;
 
        if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
                return 0;
@@ -314,16 +314,16 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
                        ext2_error(sb, __func__,
                                   "bad page in #%lu",
                                   inode->i_ino);
-                       filp->f_pos += PAGE_CACHE_SIZE - offset;
+                       ctx->pos += PAGE_CACHE_SIZE - offset;
                        return PTR_ERR(page);
                }
                kaddr = page_address(page);
                if (unlikely(need_revalidate)) {
                        if (offset) {
                                offset = ext2_validate_entry(kaddr, offset, chunk_mask);
-                               filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
+                               ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
                        }
-                       filp->f_version = inode->i_version;
+                       file->f_version = inode->i_version;
                        need_revalidate = 0;
                }
                de = (ext2_dirent *)(kaddr+offset);
@@ -336,22 +336,19 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
                                return -EIO;
                        }
                        if (de->inode) {
-                               int over;
                                unsigned char d_type = DT_UNKNOWN;
 
                                if (types && de->file_type < EXT2_FT_MAX)
                                        d_type = types[de->file_type];
 
-                               offset = (char *)de - kaddr;
-                               over = filldir(dirent, de->name, de->name_len,
-                                               (n<<PAGE_CACHE_SHIFT) | offset,
-                                               le32_to_cpu(de->inode), d_type);
-                               if (over) {
+                               if (!dir_emit(ctx, de->name, de->name_len,
+                                               le32_to_cpu(de->inode),
+                                               d_type)) {
                                        ext2_put_page(page);
                                        return 0;
                                }
                        }
-                       filp->f_pos += ext2_rec_len_from_disk(de->rec_len);
+                       ctx->pos += ext2_rec_len_from_disk(de->rec_len);
                }
                ext2_put_page(page);
        }
@@ -724,7 +721,7 @@ not_empty:
 const struct file_operations ext2_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = ext2_readdir,
+       .iterate        = ext2_readdir,
        .unlocked_ioctl = ext2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext2_compat_ioctl,
index 87eccbbca25541a45f8e4b008728376cf7b59f5b..f522425aaa24ff56f6e01f02c2f3313819f62ca1 100644 (file)
@@ -28,8 +28,7 @@ static unsigned char ext3_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
 };
 
-static int ext3_dx_readdir(struct file * filp,
-                          void * dirent, filldir_t filldir);
+static int ext3_dx_readdir(struct file *, struct dir_context *);
 
 static unsigned char get_dtype(struct super_block *sb, int filetype)
 {
@@ -91,36 +90,30 @@ int ext3_check_dir_entry (const char * function, struct inode * dir,
        return error_msg == NULL ? 1 : 0;
 }
 
-static int ext3_readdir(struct file * filp,
-                        void * dirent, filldir_t filldir)
+static int ext3_readdir(struct file *file, struct dir_context *ctx)
 {
-       int error = 0;
        unsigned long offset;
-       int i, stored;
+       int i;
        struct ext3_dir_entry_2 *de;
        int err;
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
-       int ret = 0;
        int dir_has_error = 0;
 
        if (is_dx_dir(inode)) {
-               err = ext3_dx_readdir(filp, dirent, filldir);
-               if (err != ERR_BAD_DX_DIR) {
-                       ret = err;
-                       goto out;
-               }
+               err = ext3_dx_readdir(file, ctx);
+               if (err != ERR_BAD_DX_DIR)
+                       return err;
                /*
                 * We don't set the inode dirty flag since it's not
                 * critical that it get flushed back to the disk.
                 */
-               EXT3_I(file_inode(filp))->i_flags &= ~EXT3_INDEX_FL;
+               EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
        }
-       stored = 0;
-       offset = filp->f_pos & (sb->s_blocksize - 1);
+       offset = ctx->pos & (sb->s_blocksize - 1);
 
-       while (!error && !stored && filp->f_pos < inode->i_size) {
-               unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
+       while (ctx->pos < inode->i_size) {
+               unsigned long blk = ctx->pos >> EXT3_BLOCK_SIZE_BITS(sb);
                struct buffer_head map_bh;
                struct buffer_head *bh = NULL;
 
@@ -129,12 +122,12 @@ static int ext3_readdir(struct file * filp,
                if (err > 0) {
                        pgoff_t index = map_bh.b_blocknr >>
                                        (PAGE_CACHE_SHIFT - inode->i_blkbits);
-                       if (!ra_has_index(&filp->f_ra, index))
+                       if (!ra_has_index(&file->f_ra, index))
                                page_cache_sync_readahead(
                                        sb->s_bdev->bd_inode->i_mapping,
-                                       &filp->f_ra, filp,
+                                       &file->f_ra, file,
                                        index, 1);
-                       filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+                       file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
                        bh = ext3_bread(NULL, inode, blk, 0, &err);
                }
 
@@ -146,22 +139,21 @@ static int ext3_readdir(struct file * filp,
                        if (!dir_has_error) {
                                ext3_error(sb, __func__, "directory #%lu "
                                        "contains a hole at offset %lld",
-                                       inode->i_ino, filp->f_pos);
+                                       inode->i_ino, ctx->pos);
                                dir_has_error = 1;
                        }
                        /* corrupt size?  Maybe no more blocks to read */
-                       if (filp->f_pos > inode->i_blocks << 9)
+                       if (ctx->pos > inode->i_blocks << 9)
                                break;
-                       filp->f_pos += sb->s_blocksize - offset;
+                       ctx->pos += sb->s_blocksize - offset;
                        continue;
                }
 
-revalidate:
                /* If the dir block has changed since the last call to
                 * readdir(2), then we might be pointing to an invalid
                 * dirent right now.  Scan from the start of the block
                 * to make sure. */
-               if (filp->f_version != inode->i_version) {
+               if (offset && file->f_version != inode->i_version) {
                        for (i = 0; i < sb->s_blocksize && i < offset; ) {
                                de = (struct ext3_dir_entry_2 *)
                                        (bh->b_data + i);
@@ -177,53 +169,40 @@ revalidate:
                                i += ext3_rec_len_from_disk(de->rec_len);
                        }
                        offset = i;
-                       filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+                       ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
                                | offset;
-                       filp->f_version = inode->i_version;
+                       file->f_version = inode->i_version;
                }
 
-               while (!error && filp->f_pos < inode->i_size
+               while (ctx->pos < inode->i_size
                       && offset < sb->s_blocksize) {
                        de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
                        if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
                                                   bh, offset)) {
-                               /* On error, skip the f_pos to the
+                               /* On error, skip the to the
                                    next block. */
-                               filp->f_pos = (filp->f_pos |
+                               ctx->pos = (ctx->pos |
                                                (sb->s_blocksize - 1)) + 1;
-                               brelse (bh);
-                               ret = stored;
-                               goto out;
+                               break;
                        }
                        offset += ext3_rec_len_from_disk(de->rec_len);
                        if (le32_to_cpu(de->inode)) {
-                               /* We might block in the next section
-                                * if the data destination is
-                                * currently swapped out.  So, use a
-                                * version stamp to detect whether or
-                                * not the directory has been modified
-                                * during the copy operation.
-                                */
-                               u64 version = filp->f_version;
-
-                               error = filldir(dirent, de->name,
-                                               de->name_len,
-                                               filp->f_pos,
-                                               le32_to_cpu(de->inode),
-                                               get_dtype(sb, de->file_type));
-                               if (error)
-                                       break;
-                               if (version != filp->f_version)
-                                       goto revalidate;
-                               stored ++;
+                               if (!dir_emit(ctx, de->name, de->name_len,
+                                             le32_to_cpu(de->inode),
+                                             get_dtype(sb, de->file_type))) {
+                                       brelse(bh);
+                                       return 0;
+                               }
                        }
-                       filp->f_pos += ext3_rec_len_from_disk(de->rec_len);
+                       ctx->pos += ext3_rec_len_from_disk(de->rec_len);
                }
                offset = 0;
                brelse (bh);
+               if (ctx->pos < inode->i_size)
+                       if (!dir_relax(inode))
+                               return 0;
        }
-out:
-       return ret;
+       return 0;
 }
 
 static inline int is_32bit_api(void)
@@ -452,62 +431,54 @@ int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
  * for all entres on the fname linked list.  (Normally there is only
  * one entry on the linked list, unless there are 62 bit hash collisions.)
  */
-static int call_filldir(struct file * filp, void * dirent,
-                       filldir_t filldir, struct fname *fname)
+static bool call_filldir(struct file *file, struct dir_context *ctx,
+                       struct fname *fname)
 {
-       struct dir_private_info *info = filp->private_data;
-       loff_t  curr_pos;
-       struct inode *inode = file_inode(filp);
-       struct super_block * sb;
-       int error;
-
-       sb = inode->i_sb;
+       struct dir_private_info *info = file->private_data;
+       struct inode *inode = file_inode(file);
+       struct super_block *sb = inode->i_sb;
 
        if (!fname) {
                printk("call_filldir: called with null fname?!?\n");
-               return 0;
+               return true;
        }
-       curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+       ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
        while (fname) {
-               error = filldir(dirent, fname->name,
-                               fname->name_len, curr_pos,
+               if (!dir_emit(ctx, fname->name, fname->name_len,
                                fname->inode,
-                               get_dtype(sb, fname->file_type));
-               if (error) {
-                       filp->f_pos = curr_pos;
+                               get_dtype(sb, fname->file_type))) {
                        info->extra_fname = fname;
-                       return error;
+                       return false;
                }
                fname = fname->next;
        }
-       return 0;
+       return true;
 }
 
-static int ext3_dx_readdir(struct file * filp,
-                        void * dirent, filldir_t filldir)
+static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dir_private_info *info = filp->private_data;
-       struct inode *inode = file_inode(filp);
+       struct dir_private_info *info = file->private_data;
+       struct inode *inode = file_inode(file);
        struct fname *fname;
        int     ret;
 
        if (!info) {
-               info = ext3_htree_create_dir_info(filp, filp->f_pos);
+               info = ext3_htree_create_dir_info(file, ctx->pos);
                if (!info)
                        return -ENOMEM;
-               filp->private_data = info;
+               file->private_data = info;
        }
 
-       if (filp->f_pos == ext3_get_htree_eof(filp))
+       if (ctx->pos == ext3_get_htree_eof(file))
                return 0;       /* EOF */
 
        /* Some one has messed with f_pos; reset the world */
-       if (info->last_pos != filp->f_pos) {
+       if (info->last_pos != ctx->pos) {
                free_rb_tree_fname(&info->root);
                info->curr_node = NULL;
                info->extra_fname = NULL;
-               info->curr_hash = pos2maj_hash(filp, filp->f_pos);
-               info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+               info->curr_hash = pos2maj_hash(file, ctx->pos);
+               info->curr_minor_hash = pos2min_hash(file, ctx->pos);
        }
 
        /*
@@ -515,7 +486,7 @@ static int ext3_dx_readdir(struct file * filp,
         * chain, return them first.
         */
        if (info->extra_fname) {
-               if (call_filldir(filp, dirent, filldir, info->extra_fname))
+               if (!call_filldir(file, ctx, info->extra_fname))
                        goto finished;
                info->extra_fname = NULL;
                goto next_node;
@@ -529,17 +500,17 @@ static int ext3_dx_readdir(struct file * filp,
                 * cached entries.
                 */
                if ((!info->curr_node) ||
-                   (filp->f_version != inode->i_version)) {
+                   (file->f_version != inode->i_version)) {
                        info->curr_node = NULL;
                        free_rb_tree_fname(&info->root);
-                       filp->f_version = inode->i_version;
-                       ret = ext3_htree_fill_tree(filp, info->curr_hash,
+                       file->f_version = inode->i_version;
+                       ret = ext3_htree_fill_tree(file, info->curr_hash,
                                                   info->curr_minor_hash,
                                                   &info->next_hash);
                        if (ret < 0)
                                return ret;
                        if (ret == 0) {
-                               filp->f_pos = ext3_get_htree_eof(filp);
+                               ctx->pos = ext3_get_htree_eof(file);
                                break;
                        }
                        info->curr_node = rb_first(&info->root);
@@ -548,7 +519,7 @@ static int ext3_dx_readdir(struct file * filp,
                fname = rb_entry(info->curr_node, struct fname, rb_hash);
                info->curr_hash = fname->hash;
                info->curr_minor_hash = fname->minor_hash;
-               if (call_filldir(filp, dirent, filldir, fname))
+               if (!call_filldir(file, ctx, fname))
                        break;
        next_node:
                info->curr_node = rb_next(info->curr_node);
@@ -559,7 +530,7 @@ static int ext3_dx_readdir(struct file * filp,
                        info->curr_minor_hash = fname->minor_hash;
                } else {
                        if (info->next_hash == ~0) {
-                               filp->f_pos = ext3_get_htree_eof(filp);
+                               ctx->pos = ext3_get_htree_eof(file);
                                break;
                        }
                        info->curr_hash = info->next_hash;
@@ -567,7 +538,7 @@ static int ext3_dx_readdir(struct file * filp,
                }
        }
 finished:
-       info->last_pos = filp->f_pos;
+       info->last_pos = ctx->pos;
        return 0;
 }
 
@@ -582,7 +553,7 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
 const struct file_operations ext3_dir_operations = {
        .llseek         = ext3_dir_llseek,
        .read           = generic_read_dir,
-       .readdir        = ext3_readdir,
+       .iterate        = ext3_readdir,
        .unlocked_ioctl = ext3_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext3_compat_ioctl,
index 23c712825640926988883eedb7eefaf69d9c3250..f67668f724baaf4112e994ded2e55a4c62089c3a 100644 (file)
@@ -1825,19 +1825,20 @@ ext3_readpages(struct file *file, struct address_space *mapping,
        return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
 }
 
-static void ext3_invalidatepage(struct page *page, unsigned long offset)
+static void ext3_invalidatepage(struct page *page, unsigned int offset,
+                               unsigned int length)
 {
        journal_t *journal = EXT3_JOURNAL(page->mapping->host);
 
-       trace_ext3_invalidatepage(page, offset);
+       trace_ext3_invalidatepage(page, offset, length);
 
        /*
         * If it's a full truncate we just forget about the pending dirtying
         */
-       if (offset == 0)
+       if (offset == 0 && length == PAGE_CACHE_SIZE)
                ClearPageChecked(page);
 
-       journal_invalidatepage(journal, page, offset);
+       journal_invalidatepage(journal, page, offset, length);
 }
 
 static int ext3_releasepage(struct page *page, gfp_t wait)
index 692de13e35963a86dbbbab1cb246a3f2b74ba202..cea8ecf3e76e47efb977127a95cb7b70a9d5f377 100644 (file)
@@ -576,11 +576,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
                if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
                                        (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
                                                +((char *)de - bh->b_data))) {
-                       /* On error, skip the f_pos to the next block. */
-                       dir_file->f_pos = (dir_file->f_pos |
-                                       (dir->i_sb->s_blocksize - 1)) + 1;
-                       brelse (bh);
-                       return count;
+                       /* silently ignore the rest of the block */
+                       break;
                }
                ext3fs_dirhash(de->name, de->name_len, hinfo);
                if ((hinfo->hash < start_hash) ||
index d0f13eada0ed5799295a5378642cd3786cf32595..58339393fa6e55c689adfa375b804a108a25c924 100644 (file)
@@ -682,11 +682,15 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
 
 static inline int test_root(ext4_group_t a, int b)
 {
-       int num = b;
-
-       while (a > num)
-               num *= b;
-       return num == a;
+       while (1) {
+               if (a < b)
+                       return 0;
+               if (a == b)
+                       return 1;
+               if ((a % b) != 0)
+                       return 0;
+               a = a / b;
+       }
 }
 
 static int ext4_group_sparse(ext4_group_t group)
index f8d56e4254e05b2866c98e7f6674b71109669cfb..3c7d288ae94ce5ee5a83f66bb1df93df6a986db8 100644 (file)
@@ -29,8 +29,7 @@
 #include "ext4.h"
 #include "xattr.h"
 
-static int ext4_dx_readdir(struct file *filp,
-                          void *dirent, filldir_t filldir);
+static int ext4_dx_readdir(struct file *, struct dir_context *);
 
 /**
  * Check if the given dir-inode refers to an htree-indexed directory
@@ -103,60 +102,56 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
        return 1;
 }
 
-static int ext4_readdir(struct file *filp,
-                        void *dirent, filldir_t filldir)
+static int ext4_readdir(struct file *file, struct dir_context *ctx)
 {
-       int error = 0;
        unsigned int offset;
        int i, stored;
        struct ext4_dir_entry_2 *de;
        int err;
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
-       int ret = 0;
        int dir_has_error = 0;
 
        if (is_dx_dir(inode)) {
-               err = ext4_dx_readdir(filp, dirent, filldir);
+               err = ext4_dx_readdir(file, ctx);
                if (err != ERR_BAD_DX_DIR) {
-                       ret = err;
-                       goto out;
+                       return err;
                }
                /*
                 * We don't set the inode dirty flag since it's not
                 * critical that it get flushed back to the disk.
                 */
-               ext4_clear_inode_flag(file_inode(filp),
+               ext4_clear_inode_flag(file_inode(file),
                                      EXT4_INODE_INDEX);
        }
 
        if (ext4_has_inline_data(inode)) {
                int has_inline_data = 1;
-               ret = ext4_read_inline_dir(filp, dirent, filldir,
+               int ret = ext4_read_inline_dir(file, ctx,
                                           &has_inline_data);
                if (has_inline_data)
                        return ret;
        }
 
        stored = 0;
-       offset = filp->f_pos & (sb->s_blocksize - 1);
+       offset = ctx->pos & (sb->s_blocksize - 1);
 
-       while (!error && !stored && filp->f_pos < inode->i_size) {
+       while (ctx->pos < inode->i_size) {
                struct ext4_map_blocks map;
                struct buffer_head *bh = NULL;
 
-               map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb);
+               map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
                map.m_len = 1;
                err = ext4_map_blocks(NULL, inode, &map, 0);
                if (err > 0) {
                        pgoff_t index = map.m_pblk >>
                                        (PAGE_CACHE_SHIFT - inode->i_blkbits);
-                       if (!ra_has_index(&filp->f_ra, index))
+                       if (!ra_has_index(&file->f_ra, index))
                                page_cache_sync_readahead(
                                        sb->s_bdev->bd_inode->i_mapping,
-                                       &filp->f_ra, filp,
+                                       &file->f_ra, file,
                                        index, 1);
-                       filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+                       file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
                        bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err);
                }
 
@@ -166,16 +161,16 @@ static int ext4_readdir(struct file *filp,
                 */
                if (!bh) {
                        if (!dir_has_error) {
-                               EXT4_ERROR_FILE(filp, 0,
+                               EXT4_ERROR_FILE(file, 0,
                                                "directory contains a "
                                                "hole at offset %llu",
-                                          (unsigned long long) filp->f_pos);
+                                          (unsigned long long) ctx->pos);
                                dir_has_error = 1;
                        }
                        /* corrupt size?  Maybe no more blocks to read */
-                       if (filp->f_pos > inode->i_blocks << 9)
+                       if (ctx->pos > inode->i_blocks << 9)
                                break;
-                       filp->f_pos += sb->s_blocksize - offset;
+                       ctx->pos += sb->s_blocksize - offset;
                        continue;
                }
 
@@ -183,21 +178,20 @@ static int ext4_readdir(struct file *filp,
                if (!buffer_verified(bh) &&
                    !ext4_dirent_csum_verify(inode,
                                (struct ext4_dir_entry *)bh->b_data)) {
-                       EXT4_ERROR_FILE(filp, 0, "directory fails checksum "
+                       EXT4_ERROR_FILE(file, 0, "directory fails checksum "
                                        "at offset %llu",
-                                       (unsigned long long)filp->f_pos);
-                       filp->f_pos += sb->s_blocksize - offset;
+                                       (unsigned long long)ctx->pos);
+                       ctx->pos += sb->s_blocksize - offset;
                        brelse(bh);
                        continue;
                }
                set_buffer_verified(bh);
 
-revalidate:
                /* If the dir block has changed since the last call to
                 * readdir(2), then we might be pointing to an invalid
                 * dirent right now.  Scan from the start of the block
                 * to make sure. */
-               if (filp->f_version != inode->i_version) {
+               if (file->f_version != inode->i_version) {
                        for (i = 0; i < sb->s_blocksize && i < offset; ) {
                                de = (struct ext4_dir_entry_2 *)
                                        (bh->b_data + i);
@@ -214,57 +208,46 @@ revalidate:
                                                            sb->s_blocksize);
                        }
                        offset = i;
-                       filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+                       ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
                                | offset;
-                       filp->f_version = inode->i_version;
+                       file->f_version = inode->i_version;
                }
 
-               while (!error && filp->f_pos < inode->i_size
+               while (ctx->pos < inode->i_size
                       && offset < sb->s_blocksize) {
                        de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
-                       if (ext4_check_dir_entry(inode, filp, de, bh,
+                       if (ext4_check_dir_entry(inode, file, de, bh,
                                                 bh->b_data, bh->b_size,
                                                 offset)) {
                                /*
-                                * On error, skip the f_pos to the next block
+                                * On error, skip to the next block
                                 */
-                               filp->f_pos = (filp->f_pos |
+                               ctx->pos = (ctx->pos |
                                                (sb->s_blocksize - 1)) + 1;
-                               brelse(bh);
-                               ret = stored;
-                               goto out;
+                               break;
                        }
                        offset += ext4_rec_len_from_disk(de->rec_len,
                                        sb->s_blocksize);
                        if (le32_to_cpu(de->inode)) {
-                               /* We might block in the next section
-                                * if the data destination is
-                                * currently swapped out.  So, use a
-                                * version stamp to detect whether or
-                                * not the directory has been modified
-                                * during the copy operation.
-                                */
-                               u64 version = filp->f_version;
-
-                               error = filldir(dirent, de->name,
+                               if (!dir_emit(ctx, de->name,
                                                de->name_len,
-                                               filp->f_pos,
                                                le32_to_cpu(de->inode),
-                                               get_dtype(sb, de->file_type));
-                               if (error)
-                                       break;
-                               if (version != filp->f_version)
-                                       goto revalidate;
-                               stored++;
+                                               get_dtype(sb, de->file_type))) {
+                                       brelse(bh);
+                                       return 0;
+                               }
                        }
-                       filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
+                       ctx->pos += ext4_rec_len_from_disk(de->rec_len,
                                                sb->s_blocksize);
                }
                offset = 0;
                brelse(bh);
+               if (ctx->pos < inode->i_size) {
+                       if (!dir_relax(inode))
+                               return 0;
+               }
        }
-out:
-       return ret;
+       return 0;
 }
 
 static inline int is_32bit_api(void)
@@ -492,16 +475,12 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
  * for all entres on the fname linked list.  (Normally there is only
  * one entry on the linked list, unless there are 62 bit hash collisions.)
  */
-static int call_filldir(struct file *filp, void *dirent,
-                       filldir_t filldir, struct fname *fname)
+static int call_filldir(struct file *file, struct dir_context *ctx,
+                       struct fname *fname)
 {
-       struct dir_private_info *info = filp->private_data;
-       loff_t  curr_pos;
-       struct inode *inode = file_inode(filp);
-       struct super_block *sb;
-       int error;
-
-       sb = inode->i_sb;
+       struct dir_private_info *info = file->private_data;
+       struct inode *inode = file_inode(file);
+       struct super_block *sb = inode->i_sb;
 
        if (!fname) {
                ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: comm %s: "
@@ -509,47 +488,44 @@ static int call_filldir(struct file *filp, void *dirent,
                         inode->i_ino, current->comm);
                return 0;
        }
-       curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+       ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
        while (fname) {
-               error = filldir(dirent, fname->name,
-                               fname->name_len, curr_pos,
+               if (!dir_emit(ctx, fname->name,
+                               fname->name_len,
                                fname->inode,
-                               get_dtype(sb, fname->file_type));
-               if (error) {
-                       filp->f_pos = curr_pos;
+                               get_dtype(sb, fname->file_type))) {
                        info->extra_fname = fname;
-                       return error;
+                       return 1;
                }
                fname = fname->next;
        }
        return 0;
 }
 
-static int ext4_dx_readdir(struct file *filp,
-                        void *dirent, filldir_t filldir)
+static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dir_private_info *info = filp->private_data;
-       struct inode *inode = file_inode(filp);
+       struct dir_private_info *info = file->private_data;
+       struct inode *inode = file_inode(file);
        struct fname *fname;
        int     ret;
 
        if (!info) {
-               info = ext4_htree_create_dir_info(filp, filp->f_pos);
+               info = ext4_htree_create_dir_info(file, ctx->pos);
                if (!info)
                        return -ENOMEM;
-               filp->private_data = info;
+               file->private_data = info;
        }
 
-       if (filp->f_pos == ext4_get_htree_eof(filp))
+       if (ctx->pos == ext4_get_htree_eof(file))
                return 0;       /* EOF */
 
        /* Some one has messed with f_pos; reset the world */
-       if (info->last_pos != filp->f_pos) {
+       if (info->last_pos != ctx->pos) {
                free_rb_tree_fname(&info->root);
                info->curr_node = NULL;
                info->extra_fname = NULL;
-               info->curr_hash = pos2maj_hash(filp, filp->f_pos);
-               info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+               info->curr_hash = pos2maj_hash(file, ctx->pos);
+               info->curr_minor_hash = pos2min_hash(file, ctx->pos);
        }
 
        /*
@@ -557,7 +533,7 @@ static int ext4_dx_readdir(struct file *filp,
         * chain, return them first.
         */
        if (info->extra_fname) {
-               if (call_filldir(filp, dirent, filldir, info->extra_fname))
+               if (call_filldir(file, ctx, info->extra_fname))
                        goto finished;
                info->extra_fname = NULL;
                goto next_node;
@@ -571,17 +547,17 @@ static int ext4_dx_readdir(struct file *filp,
                 * cached entries.
                 */
                if ((!info->curr_node) ||
-                   (filp->f_version != inode->i_version)) {
+                   (file->f_version != inode->i_version)) {
                        info->curr_node = NULL;
                        free_rb_tree_fname(&info->root);
-                       filp->f_version = inode->i_version;
-                       ret = ext4_htree_fill_tree(filp, info->curr_hash,
+                       file->f_version = inode->i_version;
+                       ret = ext4_htree_fill_tree(file, info->curr_hash,
                                                   info->curr_minor_hash,
                                                   &info->next_hash);
                        if (ret < 0)
                                return ret;
                        if (ret == 0) {
-                               filp->f_pos = ext4_get_htree_eof(filp);
+                               ctx->pos = ext4_get_htree_eof(file);
                                break;
                        }
                        info->curr_node = rb_first(&info->root);
@@ -590,7 +566,7 @@ static int ext4_dx_readdir(struct file *filp,
                fname = rb_entry(info->curr_node, struct fname, rb_hash);
                info->curr_hash = fname->hash;
                info->curr_minor_hash = fname->minor_hash;
-               if (call_filldir(filp, dirent, filldir, fname))
+               if (call_filldir(file, ctx, fname))
                        break;
        next_node:
                info->curr_node = rb_next(info->curr_node);
@@ -601,7 +577,7 @@ static int ext4_dx_readdir(struct file *filp,
                        info->curr_minor_hash = fname->minor_hash;
                } else {
                        if (info->next_hash == ~0) {
-                               filp->f_pos = ext4_get_htree_eof(filp);
+                               ctx->pos = ext4_get_htree_eof(file);
                                break;
                        }
                        info->curr_hash = info->next_hash;
@@ -609,7 +585,7 @@ static int ext4_dx_readdir(struct file *filp,
                }
        }
 finished:
-       info->last_pos = filp->f_pos;
+       info->last_pos = ctx->pos;
        return 0;
 }
 
@@ -624,7 +600,7 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
 const struct file_operations ext4_dir_operations = {
        .llseek         = ext4_dir_llseek,
        .read           = generic_read_dir,
-       .readdir        = ext4_readdir,
+       .iterate        = ext4_readdir,
        .unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext4_compat_ioctl,
index 5aae3d12d4004109cff811b77011486abbaed1b5..b577e45425b0ac921ae1f0904b6ef84b7ace8fb3 100644 (file)
@@ -176,39 +176,29 @@ struct ext4_map_blocks {
        unsigned int m_flags;
 };
 
-/*
- * For delayed allocation tracking
- */
-struct mpage_da_data {
-       struct inode *inode;
-       sector_t b_blocknr;             /* start block number of extent */
-       size_t b_size;                  /* size of extent */
-       unsigned long b_state;          /* state of the extent */
-       unsigned long first_page, next_page;    /* extent of pages */
-       struct writeback_control *wbc;
-       int io_done;
-       int pages_written;
-       int retval;
-};
-
 /*
  * Flags for ext4_io_end->flags
  */
 #define        EXT4_IO_END_UNWRITTEN   0x0001
-#define EXT4_IO_END_ERROR      0x0002
-#define EXT4_IO_END_DIRECT     0x0004
+#define EXT4_IO_END_DIRECT     0x0002
 
 /*
- * For converting uninitialized extents on a work queue.
+ * For converting uninitialized extents on a work queue. 'handle' is used for
+ * buffered writeback.
  */
 typedef struct ext4_io_end {
        struct list_head        list;           /* per-file finished IO list */
+       handle_t                *handle;        /* handle reserved for extent
+                                                * conversion */
        struct inode            *inode;         /* file being written to */
+       struct bio              *bio;           /* Linked list of completed
+                                                * bios covering the extent */
        unsigned int            flag;           /* unwritten or not */
        loff_t                  offset;         /* offset in the file */
        ssize_t                 size;           /* size of the extent */
        struct kiocb            *iocb;          /* iocb struct for AIO */
        int                     result;         /* error value for AIO */
+       atomic_t                count;          /* reference counter */
 } ext4_io_end_t;
 
 struct ext4_io_submit {
@@ -580,11 +570,6 @@ enum {
 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER  0x0010
 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER   0x0020
 
-/*
- * Flags used by ext4_discard_partial_page_buffers
- */
-#define EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED  0x0001
-
 /*
  * ioctl commands
  */
@@ -879,6 +864,7 @@ struct ext4_inode_info {
        rwlock_t i_es_lock;
        struct list_head i_es_lru;
        unsigned int i_es_lru_nr;       /* protected by i_es_lock */
+       unsigned long i_touch_when;     /* jiffies of last accessing */
 
        /* ialloc */
        ext4_group_t    i_last_alloc_group;
@@ -903,12 +889,22 @@ struct ext4_inode_info {
        qsize_t i_reserved_quota;
 #endif
 
-       /* completed IOs that might need unwritten extents handling */
-       struct list_head i_completed_io_list;
+       /* Lock protecting lists below */
        spinlock_t i_completed_io_lock;
+       /*
+        * Completed IOs that need unwritten extents handling and have
+        * transaction reserved
+        */
+       struct list_head i_rsv_conversion_list;
+       /*
+        * Completed IOs that need unwritten extents handling and don't have
+        * transaction reserved
+        */
+       struct list_head i_unrsv_conversion_list;
        atomic_t i_ioend_count; /* Number of outstanding io_end structs */
        atomic_t i_unwritten; /* Nr. of inflight conversions pending */
-       struct work_struct i_unwritten_work;    /* deferred extent conversion */
+       struct work_struct i_rsv_conversion_work;
+       struct work_struct i_unrsv_conversion_work;
 
        spinlock_t i_block_reservation_lock;
 
@@ -1245,7 +1241,6 @@ struct ext4_sb_info {
        unsigned int s_mb_stats;
        unsigned int s_mb_order2_reqs;
        unsigned int s_mb_group_prealloc;
-       unsigned int s_max_writeback_mb_bump;
        unsigned int s_max_dir_size_kb;
        /* where last allocation was done - for stream allocation */
        unsigned long s_mb_last_group;
@@ -1281,8 +1276,10 @@ struct ext4_sb_info {
        struct flex_groups *s_flex_groups;
        ext4_group_t s_flex_groups_allocated;
 
-       /* workqueue for dio unwritten */
-       struct workqueue_struct *dio_unwritten_wq;
+       /* workqueue for unreserved extent convertions (dio) */
+       struct workqueue_struct *unrsv_conversion_wq;
+       /* workqueue for reserved extent conversions (buffered io) */
+       struct workqueue_struct *rsv_conversion_wq;
 
        /* timer for periodic error stats printing */
        struct timer_list s_err_report;
@@ -1307,6 +1304,7 @@ struct ext4_sb_info {
        /* Reclaim extents from extent status tree */
        struct shrinker s_es_shrinker;
        struct list_head s_es_lru;
+       unsigned long s_es_last_sorted;
        struct percpu_counter s_extent_cache_cnt;
        spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
 };
@@ -1342,6 +1340,9 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode,
                                              struct ext4_io_end *io_end)
 {
        if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+               /* Writeback has to have coversion transaction reserved */
+               WARN_ON(EXT4_SB(inode->i_sb)->s_journal && !io_end->handle &&
+                       !(io_end->flag & EXT4_IO_END_DIRECT));
                io_end->flag |= EXT4_IO_END_UNWRITTEN;
                atomic_inc(&EXT4_I(inode)->i_unwritten);
        }
@@ -1999,7 +2000,6 @@ static inline  unsigned char get_dtype(struct super_block *sb, int filetype)
 
 /* fsync.c */
 extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
-extern int ext4_flush_unwritten_io(struct inode *);
 
 /* hash.c */
 extern int ext4fs_dirhash(const char *name, int len, struct
@@ -2088,7 +2088,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int);
 extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
 extern int ext4_can_truncate(struct inode *inode);
 extern void ext4_truncate(struct inode *);
-extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
+extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
 extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
 extern void ext4_set_inode_flags(struct inode *);
 extern void ext4_get_inode_flags(struct ext4_inode_info *);
@@ -2096,9 +2096,12 @@ extern int ext4_alloc_da_blocks(struct inode *inode);
 extern void ext4_set_aops(struct inode *inode);
 extern int ext4_writepage_trans_blocks(struct inode *);
 extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
-extern int ext4_discard_partial_page_buffers(handle_t *handle,
-               struct address_space *mapping, loff_t from,
-               loff_t length, int flags);
+extern int ext4_block_truncate_page(handle_t *handle,
+               struct address_space *mapping, loff_t from);
+extern int ext4_block_zero_page_range(handle_t *handle,
+               struct address_space *mapping, loff_t from, loff_t length);
+extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
+                            loff_t lstart, loff_t lend);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern void ext4_da_update_reserve_space(struct inode *inode,
@@ -2111,7 +2114,7 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
                                const struct iovec *iov, loff_t offset,
                                unsigned long nr_segs);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
-extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
+extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
 extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
                                 ext4_lblk_t first, ext4_lblk_t stop);
@@ -2166,42 +2169,96 @@ extern int ext4_alloc_flex_bg_array(struct super_block *sb,
                                    ext4_group_t ngroup);
 extern const char *ext4_decode_error(struct super_block *sb, int errno,
                                     char nbuf[16]);
+
 extern __printf(4, 5)
 void __ext4_error(struct super_block *, const char *, unsigned int,
                  const char *, ...);
-#define ext4_error(sb, message...)     __ext4_error(sb, __func__,      \
-                                                    __LINE__, ## message)
 extern __printf(5, 6)
-void ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t,
+void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t,
                      const char *, ...);
 extern __printf(5, 6)
-void ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
+void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
                     const char *, ...);
 extern void __ext4_std_error(struct super_block *, const char *,
                             unsigned int, int);
 extern __printf(4, 5)
 void __ext4_abort(struct super_block *, const char *, unsigned int,
                  const char *, ...);
-#define ext4_abort(sb, message...)     __ext4_abort(sb, __func__, \
-                                                      __LINE__, ## message)
 extern __printf(4, 5)
 void __ext4_warning(struct super_block *, const char *, unsigned int,
                    const char *, ...);
-#define ext4_warning(sb, message...)   __ext4_warning(sb, __func__, \
-                                                      __LINE__, ## message)
 extern __printf(3, 4)
-void ext4_msg(struct super_block *, const char *, const char *, ...);
+void __ext4_msg(struct super_block *, const char *, const char *, ...);
 extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
                           const char *, unsigned int, const char *);
-#define dump_mmp_msg(sb, mmp, msg)     __dump_mmp_msg(sb, mmp, __func__, \
-                                                      __LINE__, msg)
 extern __printf(7, 8)
 void __ext4_grp_locked_error(const char *, unsigned int,
                             struct super_block *, ext4_group_t,
                             unsigned long, ext4_fsblk_t,
                             const char *, ...);
-#define ext4_grp_locked_error(sb, grp, message...) \
-       __ext4_grp_locked_error(__func__, __LINE__, (sb), (grp), ## message)
+
+#ifdef CONFIG_PRINTK
+
+#define ext4_error_inode(inode, func, line, block, fmt, ...)           \
+       __ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__)
+#define ext4_error_file(file, func, line, block, fmt, ...)             \
+       __ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__)
+#define ext4_error(sb, fmt, ...)                                       \
+       __ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
+#define ext4_abort(sb, fmt, ...)                                       \
+       __ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
+#define ext4_warning(sb, fmt, ...)                                     \
+       __ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
+#define ext4_msg(sb, level, fmt, ...)                          \
+       __ext4_msg(sb, level, fmt, ##__VA_ARGS__)
+#define dump_mmp_msg(sb, mmp, msg)                                     \
+       __dump_mmp_msg(sb, mmp, __func__, __LINE__, msg)
+#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...)           \
+       __ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \
+                               fmt, ##__VA_ARGS__)
+
+#else
+
+#define ext4_error_inode(inode, func, line, block, fmt, ...)           \
+do {                                                                   \
+       no_printk(fmt, ##__VA_ARGS__);                                  \
+       __ext4_error_inode(inode, "", 0, block, " ");                   \
+} while (0)
+#define ext4_error_file(file, func, line, block, fmt, ...)             \
+do {                                                                   \
+       no_printk(fmt, ##__VA_ARGS__);                                  \
+       __ext4_error_file(file, "", 0, block, " ");                     \
+} while (0)
+#define ext4_error(sb, fmt, ...)                                       \
+do {                                                                   \
+       no_printk(fmt, ##__VA_ARGS__);                                  \
+       __ext4_error(sb, "", 0, " ");                                   \
+} while (0)
+#define ext4_abort(sb, fmt, ...)                                       \
+do {                                                                   \
+       no_printk(fmt, ##__VA_ARGS__);                                  \
+       __ext4_abort(sb, "", 0, " ");                                   \
+} while (0)
+#define ext4_warning(sb, fmt, ...)                                     \
+do {                                                                   \
+       no_printk(fmt, ##__VA_ARGS__);                                  \
+       __ext4_warning(sb, "", 0, " ");                                 \
+} while (0)
+#define ext4_msg(sb, level, fmt, ...)                                  \
+do {                                                                   \
+       no_printk(fmt, ##__VA_ARGS__);                                  \
+       __ext4_msg(sb, "", " ");                                        \
+} while (0)
+#define dump_mmp_msg(sb, mmp, msg)                                     \
+       __dump_mmp_msg(sb, mmp, "", 0, "")
+#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...)           \
+do {                                                                   \
+       no_printk(fmt, ##__VA_ARGS__);                          \
+       __ext4_grp_locked_error("", 0, sb, grp, ino, block, " ");       \
+} while (0)
+
+#endif
+
 extern void ext4_update_dynamic_rev(struct super_block *sb);
 extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
                                        __u32 compat);
@@ -2312,6 +2369,7 @@ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
 {
         struct ext4_group_info ***grp_info;
         long indexv, indexh;
+        BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
         grp_info = EXT4_SB(sb)->s_group_info;
         indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
         indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
@@ -2515,7 +2573,7 @@ extern int ext4_try_create_inline_dir(handle_t *handle,
                                      struct inode *parent,
                                      struct inode *inode);
 extern int ext4_read_inline_dir(struct file *filp,
-                               void *dirent, filldir_t filldir,
+                               struct dir_context *ctx,
                                int *has_inline_data);
 extern int htree_inlinedir_to_tree(struct file *dir_file,
                                   struct inode *dir, ext4_lblk_t block,
@@ -2598,8 +2656,7 @@ struct ext4_extent;
 
 extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
 extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
-extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
-                                      int chunk);
+extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
 extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                               struct ext4_map_blocks *map, int flags);
 extern void ext4_ext_truncate(handle_t *, struct inode *);
@@ -2609,8 +2666,8 @@ extern void ext4_ext_init(struct super_block *);
 extern void ext4_ext_release(struct super_block *);
 extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
                          loff_t len);
-extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
-                         ssize_t len);
+extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
+                                         loff_t offset, ssize_t len);
 extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
                           struct ext4_map_blocks *map, int flags);
 extern int ext4_ext_calc_metadata_amount(struct inode *inode,
@@ -2650,12 +2707,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 
 /* page-io.c */
 extern int __init ext4_init_pageio(void);
-extern void ext4_add_complete_io(ext4_io_end_t *io_end);
 extern void ext4_exit_pageio(void);
-extern void ext4_ioend_shutdown(struct inode *);
-extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
-extern void ext4_end_io_work(struct work_struct *work);
+extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
+extern int ext4_put_io_end(ext4_io_end_t *io_end);
+extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
+extern void ext4_io_submit_init(struct ext4_io_submit *io,
+                               struct writeback_control *wbc);
+extern void ext4_end_io_rsv_work(struct work_struct *work);
+extern void ext4_end_io_unrsv_work(struct work_struct *work);
 extern void ext4_io_submit(struct ext4_io_submit *io);
 extern int ext4_bio_write_page(struct ext4_io_submit *io,
                               struct page *page,
@@ -2668,20 +2728,17 @@ extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp);
 extern int ext4_mmp_csum_verify(struct super_block *sb,
                                struct mmp_struct *mmp);
 
-/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
+/*
+ * Note that these flags will never ever appear in a buffer_head's state flag.
+ * See EXT4_MAP_... to see where this is used.
+ */
 enum ext4_state_bits {
        BH_Uninit       /* blocks are allocated but uninitialized on disk */
-         = BH_JBDPrivateStart,
+        = BH_JBDPrivateStart,
        BH_AllocFromCluster,    /* allocated blocks were part of already
-                                * allocated cluster. Note that this flag will
-                                * never, ever appear in a buffer_head's state
-                                * flag. See EXT4_MAP_FROM_CLUSTER to see where
-                                * this is used. */
+                                * allocated cluster. */
 };
 
-BUFFER_FNS(Uninit, uninit)
-TAS_BUFFER_FNS(Uninit, uninit)
-
 /*
  * Add new method to test whether block and inode bitmaps are properly
  * initialized. With uninit_bg reading the block from disk is not enough
index 451eb404533095fc323218fe5ea0f3caa0af8b16..72a3600aedbdffe48b5f2756bb0ad863f120a7b8 100644 (file)
@@ -38,31 +38,43 @@ static void ext4_put_nojournal(handle_t *handle)
 /*
  * Wrappers for jbd2_journal_start/end.
  */
-handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
-                                 int type, int nblocks)
+static int ext4_journal_check_start(struct super_block *sb)
 {
        journal_t *journal;
 
        might_sleep();
-
-       trace_ext4_journal_start(sb, nblocks, _RET_IP_);
        if (sb->s_flags & MS_RDONLY)
-               return ERR_PTR(-EROFS);
-
+               return -EROFS;
        WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
        journal = EXT4_SB(sb)->s_journal;
-       if (!journal)
-               return ext4_get_nojournal();
        /*
         * Special case here: if the journal has aborted behind our
         * backs (eg. EIO in the commit thread), then we still need to
         * take the FS itself readonly cleanly.
         */
-       if (is_journal_aborted(journal)) {
+       if (journal && is_journal_aborted(journal)) {
                ext4_abort(sb, "Detected aborted journal");
-               return ERR_PTR(-EROFS);
+               return -EROFS;
        }
-       return jbd2__journal_start(journal, nblocks, GFP_NOFS, type, line);
+       return 0;
+}
+
+handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+                                 int type, int blocks, int rsv_blocks)
+{
+       journal_t *journal;
+       int err;
+
+       trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_);
+       err = ext4_journal_check_start(sb);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       journal = EXT4_SB(sb)->s_journal;
+       if (!journal)
+               return ext4_get_nojournal();
+       return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS,
+                                  type, line);
 }
 
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
@@ -86,6 +98,30 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
        return err;
 }
 
+handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
+                                       int type)
+{
+       struct super_block *sb;
+       int err;
+
+       if (!ext4_handle_valid(handle))
+               return ext4_get_nojournal();
+
+       sb = handle->h_journal->j_private;
+       trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits,
+                                         _RET_IP_);
+       err = ext4_journal_check_start(sb);
+       if (err < 0) {
+               jbd2_journal_free_reserved(handle);
+               return ERR_PTR(err);
+       }
+
+       err = jbd2_journal_start_reserved(handle, type, line);
+       if (err < 0)
+               return ERR_PTR(err);
+       return handle;
+}
+
 void ext4_journal_abort_handle(const char *caller, unsigned int line,
                               const char *err_fn, struct buffer_head *bh,
                               handle_t *handle, int err)
index c8c6885406db16cd7e273a2fc3f7e37ccd27fa6c..2877258d94977d976f7497ef17bfd69ad0f2b781 100644 (file)
@@ -134,7 +134,8 @@ static inline int ext4_jbd2_credits_xattr(struct inode *inode)
 #define EXT4_HT_MIGRATE          8
 #define EXT4_HT_MOVE_EXTENTS     9
 #define EXT4_HT_XATTR           10
-#define EXT4_HT_MAX             11
+#define EXT4_HT_EXT_CONVERT     11
+#define EXT4_HT_MAX             12
 
 /**
  *   struct ext4_journal_cb_entry - Base structure for callback information.
@@ -265,7 +266,7 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
        __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
 
 handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
-                                 int type, int nblocks);
+                                 int type, int blocks, int rsv_blocks);
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
 
 #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -300,21 +301,37 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
 }
 
 #define ext4_journal_start_sb(sb, type, nblocks)                       \
-       __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks))
+       __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0)
 
 #define ext4_journal_start(inode, type, nblocks)                       \
-       __ext4_journal_start((inode), __LINE__, (type), (nblocks))
+       __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0)
+
+#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \
+       __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks))
 
 static inline handle_t *__ext4_journal_start(struct inode *inode,
                                             unsigned int line, int type,
-                                            int nblocks)
+                                            int blocks, int rsv_blocks)
 {
-       return __ext4_journal_start_sb(inode->i_sb, line, type, nblocks);
+       return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
+                                      rsv_blocks);
 }
 
 #define ext4_journal_stop(handle) \
        __ext4_journal_stop(__func__, __LINE__, (handle))
 
+#define ext4_journal_start_reserved(handle, type) \
+       __ext4_journal_start_reserved((handle), __LINE__, (type))
+
+handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
+                                       int type);
+
+static inline void ext4_journal_free_reserved(handle_t *handle)
+{
+       if (ext4_handle_valid(handle))
+               jbd2_journal_free_reserved(handle);
+}
+
 static inline handle_t *ext4_journal_current_handle(void)
 {
        return journal_current_handle();
index bc0f1910b9cfa7dd5aa93b9c5d889c6fbacd037d..7097b0f680e62bbf674364008ef66887a5e0fdca 100644 (file)
@@ -2125,7 +2125,8 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                next_del = ext4_find_delayed_extent(inode, &es);
                if (!exists && next_del) {
                        exists = 1;
-                       flags |= FIEMAP_EXTENT_DELALLOC;
+                       flags |= (FIEMAP_EXTENT_DELALLOC |
+                                 FIEMAP_EXTENT_UNKNOWN);
                }
                up_read(&EXT4_I(inode)->i_data_sem);
 
@@ -2328,17 +2329,15 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
 }
 
 /*
- * How many index/leaf blocks need to change/allocate to modify nrblocks?
+ * How many index/leaf blocks need to change/allocate to add @extents extents?
  *
- * if nrblocks are fit in a single extent (chunk flag is 1), then
- * in the worse case, each tree level index/leaf need to be changed
- * if the tree split due to insert a new extent, then the old tree
- * index/leaf need to be updated too
+ * If we add a single extent, then in the worse case, each tree level
+ * index/leaf need to be changed in case of the tree split.
  *
- * If the nrblocks are discontiguous, they could cause
- * the whole tree split more than once, but this is really rare.
+ * If more extents are inserted, they could cause the whole tree split more
+ * than once, but this is really rare.
  */
-int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
 {
        int index;
        int depth;
@@ -2349,7 +2348,7 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 
        depth = ext_depth(inode);
 
-       if (chunk)
+       if (extents <= 1)
                index = depth * 2;
        else
                index = depth * 3;
@@ -2357,20 +2356,24 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
        return index;
 }
 
+static inline int get_default_free_blocks_flags(struct inode *inode)
+{
+       if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+               return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
+       else if (ext4_should_journal_data(inode))
+               return EXT4_FREE_BLOCKS_FORGET;
+       return 0;
+}
+
 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
                              struct ext4_extent *ex,
-                             ext4_fsblk_t *partial_cluster,
+                             long long *partial_cluster,
                              ext4_lblk_t from, ext4_lblk_t to)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        unsigned short ee_len =  ext4_ext_get_actual_len(ex);
        ext4_fsblk_t pblk;
-       int flags = 0;
-
-       if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-               flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
-       else if (ext4_should_journal_data(inode))
-               flags |= EXT4_FREE_BLOCKS_FORGET;
+       int flags = get_default_free_blocks_flags(inode);
 
        /*
         * For bigalloc file systems, we never free a partial cluster
@@ -2388,7 +2391,8 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
         * partial cluster here.
         */
        pblk = ext4_ext_pblock(ex) + ee_len - 1;
-       if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
+       if ((*partial_cluster > 0) &&
+           (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
                ext4_free_blocks(handle, inode, NULL,
                                 EXT4_C2B(sbi, *partial_cluster),
                                 sbi->s_cluster_ratio, flags);
@@ -2414,41 +2418,46 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
            && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
                /* tail removal */
                ext4_lblk_t num;
+               unsigned int unaligned;
 
                num = le32_to_cpu(ex->ee_block) + ee_len - from;
                pblk = ext4_ext_pblock(ex) + ee_len - num;
-               ext_debug("free last %u blocks starting %llu\n", num, pblk);
+               /*
+                * Usually we want to free partial cluster at the end of the
+                * extent, except for the situation when the cluster is still
+                * used by any other extent (partial_cluster is negative).
+                */
+               if (*partial_cluster < 0 &&
+                   -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
+                       flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
+
+               ext_debug("free last %u blocks starting %llu partial %lld\n",
+                         num, pblk, *partial_cluster);
                ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
                /*
                 * If the block range to be freed didn't start at the
                 * beginning of a cluster, and we removed the entire
-                * extent, save the partial cluster here, since we
-                * might need to delete if we determine that the
-                * truncate operation has removed all of the blocks in
-                * the cluster.
+                * extent and the cluster is not used by any other extent,
+                * save the partial cluster here, since we might need to
+                * delete if we determine that the truncate operation has
+                * removed all of the blocks in the cluster.
+                *
+                * On the other hand, if we did not manage to free the whole
+                * extent, we have to mark the cluster as used (store negative
+                * cluster number in partial_cluster).
                 */
-               if (pblk & (sbi->s_cluster_ratio - 1) &&
-                   (ee_len == num))
+               unaligned = pblk & (sbi->s_cluster_ratio - 1);
+               if (unaligned && (ee_len == num) &&
+                   (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
                        *partial_cluster = EXT4_B2C(sbi, pblk);
-               else
+               else if (unaligned)
+                       *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
+               else if (*partial_cluster > 0)
                        *partial_cluster = 0;
-       } else if (from == le32_to_cpu(ex->ee_block)
-                  && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
-               /* head removal */
-               ext4_lblk_t num;
-               ext4_fsblk_t start;
-
-               num = to - from;
-               start = ext4_ext_pblock(ex);
-
-               ext_debug("free first %u blocks starting %llu\n", num, start);
-               ext4_free_blocks(handle, inode, NULL, start, num, flags);
-
-       } else {
-               printk(KERN_INFO "strange request: removal(2) "
-                               "%u-%u from %u:%u\n",
-                               from, to, le32_to_cpu(ex->ee_block), ee_len);
-       }
+       } else
+               ext4_error(sbi->s_sb, "strange request: removal(2) "
+                          "%u-%u from %u:%u\n",
+                          from, to, le32_to_cpu(ex->ee_block), ee_len);
        return 0;
 }
 
@@ -2461,12 +2470,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
  * @handle: The journal handle
  * @inode:  The files inode
  * @path:   The path to the leaf
+ * @partial_cluster: The cluster which we'll have to free if all extents
+ *                   has been released from it. It gets negative in case
+ *                   that the cluster is still used.
  * @start:  The first block to remove
  * @end:   The last block to remove
  */
 static int
 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
-                struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
+                struct ext4_ext_path *path,
+                long long *partial_cluster,
                 ext4_lblk_t start, ext4_lblk_t end)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2479,6 +2492,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
        unsigned short ex_ee_len;
        unsigned uninitialized = 0;
        struct ext4_extent *ex;
+       ext4_fsblk_t pblk;
 
        /* the header must be checked already in ext4_ext_remove_space() */
        ext_debug("truncate since %u in leaf to %u\n", start, end);
@@ -2490,7 +2504,9 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                return -EIO;
        }
        /* find where to start removing */
-       ex = EXT_LAST_EXTENT(eh);
+       ex = path[depth].p_ext;
+       if (!ex)
+               ex = EXT_LAST_EXTENT(eh);
 
        ex_ee_block = le32_to_cpu(ex->ee_block);
        ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2517,6 +2533,16 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
 
                /* If this extent is beyond the end of the hole, skip it */
                if (end < ex_ee_block) {
+                       /*
+                        * We're going to skip this extent and move to another,
+                        * so if this extent is not cluster aligned we have
+                        * to mark the current cluster as used to avoid
+                        * accidentally freeing it later on
+                        */
+                       pblk = ext4_ext_pblock(ex);
+                       if (pblk & (sbi->s_cluster_ratio - 1))
+                               *partial_cluster =
+                                       -((long long)EXT4_B2C(sbi, pblk));
                        ex--;
                        ex_ee_block = le32_to_cpu(ex->ee_block);
                        ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2592,7 +2618,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                                        sizeof(struct ext4_extent));
                        }
                        le16_add_cpu(&eh->eh_entries, -1);
-               } else
+               } else if (*partial_cluster > 0)
                        *partial_cluster = 0;
 
                err = ext4_ext_dirty(handle, inode, path + depth);
@@ -2610,17 +2636,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                err = ext4_ext_correct_indexes(handle, inode, path);
 
        /*
-        * If there is still a entry in the leaf node, check to see if
-        * it references the partial cluster.  This is the only place
-        * where it could; if it doesn't, we can free the cluster.
+        * Free the partial cluster only if the current extent does not
+        * reference it. Otherwise we might free used cluster.
         */
-       if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
+       if (*partial_cluster > 0 &&
            (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
             *partial_cluster)) {
-               int flags = EXT4_FREE_BLOCKS_FORGET;
-
-               if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-                       flags |= EXT4_FREE_BLOCKS_METADATA;
+               int flags = get_default_free_blocks_flags(inode);
 
                ext4_free_blocks(handle, inode, NULL,
                                 EXT4_C2B(sbi, *partial_cluster),
@@ -2664,7 +2686,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
        struct super_block *sb = inode->i_sb;
        int depth = ext_depth(inode);
        struct ext4_ext_path *path = NULL;
-       ext4_fsblk_t partial_cluster = 0;
+       long long partial_cluster = 0;
        handle_t *handle;
        int i = 0, err = 0;
 
@@ -2676,7 +2698,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
                return PTR_ERR(handle);
 
 again:
-       trace_ext4_ext_remove_space(inode, start, depth);
+       trace_ext4_ext_remove_space(inode, start, end, depth);
 
        /*
         * Check if we are removing extents inside the extent tree. If that
@@ -2844,17 +2866,14 @@ again:
                }
        }
 
-       trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
-                       path->p_hdr->eh_entries);
+       trace_ext4_ext_remove_space_done(inode, start, end, depth,
+                       partial_cluster, path->p_hdr->eh_entries);
 
        /* If we still have something in the partial cluster and we have removed
         * even the first extent, then we should free the blocks in the partial
         * cluster as well. */
-       if (partial_cluster && path->p_hdr->eh_entries == 0) {
-               int flags = EXT4_FREE_BLOCKS_FORGET;
-
-               if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-                       flags |= EXT4_FREE_BLOCKS_METADATA;
+       if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
+               int flags = get_default_free_blocks_flags(inode);
 
                ext4_free_blocks(handle, inode, NULL,
                                 EXT4_C2B(EXT4_SB(sb), partial_cluster),
@@ -4363,7 +4382,7 @@ out2:
        }
 
 out3:
-       trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
+       trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated);
 
        return err ? err : allocated;
 }
@@ -4446,7 +4465,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                return -EOPNOTSUPP;
 
        if (mode & FALLOC_FL_PUNCH_HOLE)
-               return ext4_punch_hole(file, offset, len);
+               return ext4_punch_hole(inode, offset, len);
 
        ret = ext4_convert_inline_data(inode);
        if (ret)
@@ -4548,10 +4567,9 @@ retry:
  * function, to convert the fallocated extents after IO is completed.
  * Returns 0 on success.
  */
-int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
-                                   ssize_t len)
+int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
+                                  loff_t offset, ssize_t len)
 {
-       handle_t *handle;
        unsigned int max_blocks;
        int ret = 0;
        int ret2 = 0;
@@ -4566,16 +4584,32 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
        max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
                      map.m_lblk);
        /*
-        * credits to insert 1 extent into extent tree
+        * This is somewhat ugly but the idea is clear: When transaction is
+        * reserved, everything goes into it. Otherwise we rather start several
+        * smaller transactions for conversion of each extent separately.
         */
-       credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       if (handle) {
+               handle = ext4_journal_start_reserved(handle,
+                                                    EXT4_HT_EXT_CONVERT);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               credits = 0;
+       } else {
+               /*
+                * credits to insert 1 extent into extent tree
+                */
+               credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       }
        while (ret >= 0 && ret < max_blocks) {
                map.m_lblk += ret;
                map.m_len = (max_blocks -= ret);
-               handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
-               if (IS_ERR(handle)) {
-                       ret = PTR_ERR(handle);
-                       break;
+               if (credits) {
+                       handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+                                                   credits);
+                       if (IS_ERR(handle)) {
+                               ret = PTR_ERR(handle);
+                               break;
+                       }
                }
                ret = ext4_map_blocks(handle, inode, &map,
                                      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
@@ -4586,10 +4620,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
                                     inode->i_ino, map.m_lblk,
                                     map.m_len, ret);
                ext4_mark_inode_dirty(handle, inode);
-               ret2 = ext4_journal_stop(handle);
-               if (ret <= 0 || ret2 )
+               if (credits)
+                       ret2 = ext4_journal_stop(handle);
+               if (ret <= 0 || ret2)
                        break;
        }
+       if (!credits)
+               ret2 = ext4_journal_stop(handle);
        return ret > 0 ? ret2 : ret;
 }
 
@@ -4659,7 +4696,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
                error = ext4_get_inode_loc(inode, &iloc);
                if (error)
                        return error;
-               physical = iloc.bh->b_blocknr << blockbits;
+               physical = (__u64)iloc.bh->b_blocknr << blockbits;
                offset = EXT4_GOOD_OLD_INODE_SIZE +
                                EXT4_I(inode)->i_extra_isize;
                physical += offset;
@@ -4667,7 +4704,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
                flags |= FIEMAP_EXTENT_DATA_INLINE;
                brelse(iloc.bh);
        } else { /* external block */
-               physical = EXT4_I(inode)->i_file_acl << blockbits;
+               physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
                length = inode->i_sb->s_blocksize;
        }
 
index e6941e622d310eb1ab47793b7c2e984609cebf0b..ee018d5f397e6ba88ec0bf21196e1ad0612d4e53 100644 (file)
@@ -10,6 +10,7 @@
  * Ext4 extents status tree core functions.
  */
 #include <linux/rbtree.h>
+#include <linux/list_sort.h>
 #include "ext4.h"
 #include "extents_status.h"
 #include "ext4_extents.h"
@@ -291,7 +292,6 @@ out:
 
        read_unlock(&EXT4_I(inode)->i_es_lock);
 
-       ext4_es_lru_add(inode);
        trace_ext4_es_find_delayed_extent_range_exit(inode, es);
 }
 
@@ -672,7 +672,6 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
 error:
        write_unlock(&EXT4_I(inode)->i_es_lock);
 
-       ext4_es_lru_add(inode);
        ext4_es_print_tree(inode);
 
        return err;
@@ -734,7 +733,6 @@ out:
 
        read_unlock(&EXT4_I(inode)->i_es_lock);
 
-       ext4_es_lru_add(inode);
        trace_ext4_es_lookup_extent_exit(inode, es, found);
        return found;
 }
@@ -878,12 +876,28 @@ int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
                                     EXTENT_STATUS_WRITTEN);
 }
 
+static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
+                                    struct list_head *b)
+{
+       struct ext4_inode_info *eia, *eib;
+       eia = list_entry(a, struct ext4_inode_info, i_es_lru);
+       eib = list_entry(b, struct ext4_inode_info, i_es_lru);
+
+       if (eia->i_touch_when == eib->i_touch_when)
+               return 0;
+       if (time_after(eia->i_touch_when, eib->i_touch_when))
+               return 1;
+       else
+               return -1;
+}
+
 static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct ext4_sb_info *sbi = container_of(shrink,
                                        struct ext4_sb_info, s_es_shrinker);
        struct ext4_inode_info *ei;
-       struct list_head *cur, *tmp, scanned;
+       struct list_head *cur, *tmp;
+       LIST_HEAD(skiped);
        int nr_to_scan = sc->nr_to_scan;
        int ret, nr_shrunk = 0;
 
@@ -893,23 +907,41 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
        if (!nr_to_scan)
                return ret;
 
-       INIT_LIST_HEAD(&scanned);
-
        spin_lock(&sbi->s_es_lru_lock);
+
+       /*
+        * If the inode that is at the head of LRU list is newer than
+        * last_sorted time, that means that we need to sort this list.
+        */
+       ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru);
+       if (sbi->s_es_last_sorted < ei->i_touch_when) {
+               list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
+               sbi->s_es_last_sorted = jiffies;
+       }
+
        list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
-               list_move_tail(cur, &scanned);
+               /*
+                * If we have already reclaimed all extents from extent
+                * status tree, just stop the loop immediately.
+                */
+               if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0)
+                       break;
 
                ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
 
-               read_lock(&ei->i_es_lock);
-               if (ei->i_es_lru_nr == 0) {
-                       read_unlock(&ei->i_es_lock);
+               /* Skip the inode that is newer than the last_sorted time */
+               if (sbi->s_es_last_sorted < ei->i_touch_when) {
+                       list_move_tail(cur, &skiped);
                        continue;
                }
-               read_unlock(&ei->i_es_lock);
+
+               if (ei->i_es_lru_nr == 0)
+                       continue;
 
                write_lock(&ei->i_es_lock);
                ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
+               if (ei->i_es_lru_nr == 0)
+                       list_del_init(&ei->i_es_lru);
                write_unlock(&ei->i_es_lock);
 
                nr_shrunk += ret;
@@ -917,7 +949,9 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
                if (nr_to_scan == 0)
                        break;
        }
-       list_splice_tail(&scanned, &sbi->s_es_lru);
+
+       /* Move the newer inodes into the tail of the LRU list. */
+       list_splice_tail(&skiped, &sbi->s_es_lru);
        spin_unlock(&sbi->s_es_lru_lock);
 
        ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
@@ -925,21 +959,19 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
        return ret;
 }
 
-void ext4_es_register_shrinker(struct super_block *sb)
+void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
 {
-       struct ext4_sb_info *sbi;
-
-       sbi = EXT4_SB(sb);
        INIT_LIST_HEAD(&sbi->s_es_lru);
        spin_lock_init(&sbi->s_es_lru_lock);
+       sbi->s_es_last_sorted = 0;
        sbi->s_es_shrinker.shrink = ext4_es_shrink;
        sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
        register_shrinker(&sbi->s_es_shrinker);
 }
 
-void ext4_es_unregister_shrinker(struct super_block *sb)
+void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
 {
-       unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
+       unregister_shrinker(&sbi->s_es_shrinker);
 }
 
 void ext4_es_lru_add(struct inode *inode)
@@ -947,11 +979,14 @@ void ext4_es_lru_add(struct inode *inode)
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
+       ei->i_touch_when = jiffies;
+
+       if (!list_empty(&ei->i_es_lru))
+               return;
+
        spin_lock(&sbi->s_es_lru_lock);
        if (list_empty(&ei->i_es_lru))
                list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
-       else
-               list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
        spin_unlock(&sbi->s_es_lru_lock);
 }
 
index f740eb03b7079b755d3033225a08546ab450c117..e936730cc5b029c45153d47a026028aab8a23283 100644 (file)
@@ -39,6 +39,7 @@
                                 EXTENT_STATUS_DELAYED | \
                                 EXTENT_STATUS_HOLE)
 
+struct ext4_sb_info;
 struct ext4_extent;
 
 struct extent_status {
@@ -119,8 +120,8 @@ static inline void ext4_es_store_status(struct extent_status *es,
        es->es_pblk = block;
 }
 
-extern void ext4_es_register_shrinker(struct super_block *sb);
-extern void ext4_es_unregister_shrinker(struct super_block *sb);
+extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi);
+extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
 extern void ext4_es_lru_add(struct inode *inode);
 extern void ext4_es_lru_del(struct inode *inode);
 
index b1b4d51b5d86b4e54c179ddce5f5b574238b3629..b19f0a457f329c5fb0f6974f7afff8dc79f1e31c 100644 (file)
@@ -312,7 +312,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
        blkbits = inode->i_sb->s_blocksize_bits;
        startoff = *offset;
        lastoff = startoff;
-       endoff = (map->m_lblk + map->m_len) << blkbits;
+       endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
 
        index = startoff >> PAGE_CACHE_SHIFT;
        end = endoff >> PAGE_CACHE_SHIFT;
@@ -457,7 +457,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
                ret = ext4_map_blocks(NULL, inode, &map, 0);
                if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
                        if (last != start)
-                               dataoff = last << blkbits;
+                               dataoff = (loff_t)last << blkbits;
                        break;
                }
 
@@ -468,7 +468,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
                ext4_es_find_delayed_extent_range(inode, last, last, &es);
                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
                        if (last != start)
-                               dataoff = last << blkbits;
+                               dataoff = (loff_t)last << blkbits;
                        break;
                }
 
@@ -486,7 +486,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
                }
 
                last++;
-               dataoff = last << blkbits;
+               dataoff = (loff_t)last << blkbits;
        } while (last <= end);
 
        mutex_unlock(&inode->i_mutex);
@@ -540,7 +540,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
                ret = ext4_map_blocks(NULL, inode, &map, 0);
                if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
                        last += ret;
-                       holeoff = last << blkbits;
+                       holeoff = (loff_t)last << blkbits;
                        continue;
                }
 
@@ -551,7 +551,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
                ext4_es_find_delayed_extent_range(inode, last, last, &es);
                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
                        last = es.es_lblk + es.es_len;
-                       holeoff = last << blkbits;
+                       holeoff = (loff_t)last << blkbits;
                        continue;
                }
 
@@ -566,7 +566,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
                                                              &map, &holeoff);
                        if (!unwritten) {
                                last += ret;
-                               holeoff = last << blkbits;
+                               holeoff = (loff_t)last << blkbits;
                                continue;
                        }
                }
index e0ba8a408def07583b9cd04a57259e3c56183057..a8bc47f75fa0946582ce5562ebdea334cead1ac6 100644 (file)
@@ -73,32 +73,6 @@ static int ext4_sync_parent(struct inode *inode)
        return ret;
 }
 
-/**
- * __sync_file - generic_file_fsync without the locking and filemap_write
- * @inode:     inode to sync
- * @datasync:  only sync essential metadata if true
- *
- * This is just generic_file_fsync without the locking.  This is needed for
- * nojournal mode to make sure this inodes data/metadata makes it to disk
- * properly.  The i_mutex should be held already.
- */
-static int __sync_inode(struct inode *inode, int datasync)
-{
-       int err;
-       int ret;
-
-       ret = sync_mapping_buffers(inode->i_mapping);
-       if (!(inode->i_state & I_DIRTY))
-               return ret;
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
-               return ret;
-
-       err = sync_inode_metadata(inode, 1);
-       if (ret == 0)
-               ret = err;
-       return ret;
-}
-
 /*
  * akpm: A new design for ext4_sync_file().
  *
@@ -116,7 +90,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct inode *inode = file->f_mapping->host;
        struct ext4_inode_info *ei = EXT4_I(inode);
        journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
-       int ret, err;
+       int ret = 0, err;
        tid_t commit_tid;
        bool needs_barrier = false;
 
@@ -124,25 +98,24 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        trace_ext4_sync_file_enter(file, datasync);
 
-       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-       if (ret)
-               return ret;
-       mutex_lock(&inode->i_mutex);
-
-       if (inode->i_sb->s_flags & MS_RDONLY)
-               goto out;
-
-       ret = ext4_flush_unwritten_io(inode);
-       if (ret < 0)
+       if (inode->i_sb->s_flags & MS_RDONLY) {
+               /* Make sure that we read updated s_mount_flags value */
+               smp_rmb();
+               if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+                       ret = -EROFS;
                goto out;
+       }
 
        if (!journal) {
-               ret = __sync_inode(inode, datasync);
+               ret = generic_file_fsync(file, start, end, datasync);
                if (!ret && !hlist_empty(&inode->i_dentry))
                        ret = ext4_sync_parent(inode);
                goto out;
        }
 
+       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       if (ret)
+               return ret;
        /*
         * data=writeback,ordered:
         *  The caller's filemap_fdatawrite()/wait will sync the data.
@@ -172,8 +145,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                if (!ret)
                        ret = err;
        }
- out:
-       mutex_unlock(&inode->i_mutex);
+out:
        trace_ext4_sync_file_exit(inode, ret);
        return ret;
 }
index 00a818d67b54930c74dad44c6f73dcb3f01e6a19..f03598c6ffd3a48282193a13ff30ab6e412e471b 100644 (file)
@@ -747,7 +747,8 @@ repeat_in_this_group:
                if (!handle) {
                        BUG_ON(nblocks <= 0);
                        handle = __ext4_journal_start_sb(dir->i_sb, line_no,
-                                                        handle_type, nblocks);
+                                                        handle_type, nblocks,
+                                                        0);
                        if (IS_ERR(handle)) {
                                err = PTR_ERR(handle);
                                ext4_std_error(sb, err);
index b8d5d351e24f64b1a5ee02bd90be59f5b19ce290..87b30cd357e7f1962b55bf9d452f839376187302 100644 (file)
@@ -624,7 +624,7 @@ cleanup:
                partial--;
        }
 out:
-       trace_ext4_ind_map_blocks_exit(inode, map, err);
+       trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
        return err;
 }
 
@@ -675,11 +675,6 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
 
 retry:
        if (rw == READ && ext4_should_dioread_nolock(inode)) {
-               if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) {
-                       mutex_lock(&inode->i_mutex);
-                       ext4_flush_unwritten_io(inode);
-                       mutex_unlock(&inode->i_mutex);
-               }
                /*
                 * Nolock dioread optimization may be dynamically disabled
                 * via ext4_inode_block_unlocked_dio(). Check inode's state
@@ -779,27 +774,18 @@ int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
        return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
 }
 
-int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+/*
+ * Calculate number of indirect blocks touched by mapping @nrblocks logically
+ * contiguous blocks
+ */
+int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
 {
-       int indirects;
-
-       /* if nrblocks are contiguous */
-       if (chunk) {
-               /*
-                * With N contiguous data blocks, we need at most
-                * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
-                * 2 dindirect blocks, and 1 tindirect block
-                */
-               return DIV_ROUND_UP(nrblocks,
-                                   EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
-       }
        /*
-        * if nrblocks are not contiguous, worse case, each block touch
-        * a indirect block, and each indirect block touch a double indirect
-        * block, plus a triple indirect block
+        * With N contiguous data blocks, we need at most
+        * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+        * 2 dindirect blocks, and 1 tindirect block
         */
-       indirects = nrblocks * 2 + 1;
-       return indirects;
+       return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
 }
 
 /*
@@ -940,11 +926,13 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
                             __le32 *last)
 {
        __le32 *p;
-       int     flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+       int     flags = EXT4_FREE_BLOCKS_VALIDATED;
        int     err;
 
        if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-               flags |= EXT4_FREE_BLOCKS_METADATA;
+               flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
+       else if (ext4_should_journal_data(inode))
+               flags |= EXT4_FREE_BLOCKS_FORGET;
 
        if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
                                   count)) {
index 3e2bf873e8a8b42b1564c44505e0ec8cba1971e6..d9ecbf1113a75798f4d2a5903fd2fc522ce00dba 100644 (file)
@@ -72,7 +72,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
                entry = (struct ext4_xattr_entry *)
                        ((void *)raw_inode + EXT4_I(inode)->i_inline_off);
 
-               free += le32_to_cpu(entry->e_value_size);
+               free += EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size));
                goto out;
        }
 
@@ -1404,16 +1404,15 @@ out:
  * offset as if '.' and '..' really take place.
  *
  */
-int ext4_read_inline_dir(struct file *filp,
-                        void *dirent, filldir_t filldir,
+int ext4_read_inline_dir(struct file *file,
+                        struct dir_context *ctx,
                         int *has_inline_data)
 {
-       int error = 0;
        unsigned int offset, parent_ino;
-       int i, stored;
+       int i;
        struct ext4_dir_entry_2 *de;
        struct super_block *sb;
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        int ret, inline_size = 0;
        struct ext4_iloc iloc;
        void *dir_buf = NULL;
@@ -1444,9 +1443,8 @@ int ext4_read_inline_dir(struct file *filp,
                goto out;
 
        sb = inode->i_sb;
-       stored = 0;
        parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode);
-       offset = filp->f_pos;
+       offset = ctx->pos;
 
        /*
         * dotdot_offset and dotdot_size is the real offset and
@@ -1460,104 +1458,74 @@ int ext4_read_inline_dir(struct file *filp,
        extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
        extra_size = extra_offset + inline_size;
 
-       while (!error && !stored && filp->f_pos < extra_size) {
-revalidate:
-               /*
-                * If the version has changed since the last call to
-                * readdir(2), then we might be pointing to an invalid
-                * dirent right now.  Scan from the start of the inline
-                * dir to make sure.
-                */
-               if (filp->f_version != inode->i_version) {
-                       for (i = 0; i < extra_size && i < offset;) {
-                               /*
-                                * "." is with offset 0 and
-                                * ".." is dotdot_offset.
-                                */
-                               if (!i) {
-                                       i = dotdot_offset;
-                                       continue;
-                               } else if (i == dotdot_offset) {
-                                       i = dotdot_size;
-                                       continue;
-                               }
-                               /* for other entry, the real offset in
-                                * the buf has to be tuned accordingly.
-                                */
-                               de = (struct ext4_dir_entry_2 *)
-                                       (dir_buf + i - extra_offset);
-                               /* It's too expensive to do a full
-                                * dirent test each time round this
-                                * loop, but we do have to test at
-                                * least that it is non-zero.  A
-                                * failure will be detected in the
-                                * dirent test below. */
-                               if (ext4_rec_len_from_disk(de->rec_len,
-                                       extra_size) < EXT4_DIR_REC_LEN(1))
-                                       break;
-                               i += ext4_rec_len_from_disk(de->rec_len,
-                                                           extra_size);
-                       }
-                       offset = i;
-                       filp->f_pos = offset;
-                       filp->f_version = inode->i_version;
-               }
-
-               while (!error && filp->f_pos < extra_size) {
-                       if (filp->f_pos == 0) {
-                               error = filldir(dirent, ".", 1, 0, inode->i_ino,
-                                               DT_DIR);
-                               if (error)
-                                       break;
-                               stored++;
-                               filp->f_pos = dotdot_offset;
+       /*
+        * If the version has changed since the last call to
+        * readdir(2), then we might be pointing to an invalid
+        * dirent right now.  Scan from the start of the inline
+        * dir to make sure.
+        */
+       if (file->f_version != inode->i_version) {
+               for (i = 0; i < extra_size && i < offset;) {
+                       /*
+                        * "." is with offset 0 and
+                        * ".." is dotdot_offset.
+                        */
+                       if (!i) {
+                               i = dotdot_offset;
+                               continue;
+                       } else if (i == dotdot_offset) {
+                               i = dotdot_size;
                                continue;
                        }
+                       /* for other entry, the real offset in
+                        * the buf has to be tuned accordingly.
+                        */
+                       de = (struct ext4_dir_entry_2 *)
+                               (dir_buf + i - extra_offset);
+                       /* It's too expensive to do a full
+                        * dirent test each time round this
+                        * loop, but we do have to test at
+                        * least that it is non-zero.  A
+                        * failure will be detected in the
+                        * dirent test below. */
+                       if (ext4_rec_len_from_disk(de->rec_len, extra_size)
+                               < EXT4_DIR_REC_LEN(1))
+                               break;
+                       i += ext4_rec_len_from_disk(de->rec_len,
+                                                   extra_size);
+               }
+               offset = i;
+               ctx->pos = offset;
+               file->f_version = inode->i_version;
+       }
 
-                       if (filp->f_pos == dotdot_offset) {
-                               error = filldir(dirent, "..", 2,
-                                               dotdot_offset,
-                                               parent_ino, DT_DIR);
-                               if (error)
-                                       break;
-                               stored++;
+       while (ctx->pos < extra_size) {
+               if (ctx->pos == 0) {
+                       if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR))
+                               goto out;
+                       ctx->pos = dotdot_offset;
+                       continue;
+               }
 
-                               filp->f_pos = dotdot_size;
-                               continue;
-                       }
+               if (ctx->pos == dotdot_offset) {
+                       if (!dir_emit(ctx, "..", 2, parent_ino, DT_DIR))
+                               goto out;
+                       ctx->pos = dotdot_size;
+                       continue;
+               }
 
-                       de = (struct ext4_dir_entry_2 *)
-                               (dir_buf + filp->f_pos - extra_offset);
-                       if (ext4_check_dir_entry(inode, filp, de,
-                                                iloc.bh, dir_buf,
-                                                extra_size, filp->f_pos)) {
-                               ret = stored;
+               de = (struct ext4_dir_entry_2 *)
+                       (dir_buf + ctx->pos - extra_offset);
+               if (ext4_check_dir_entry(inode, file, de, iloc.bh, dir_buf,
+                                        extra_size, ctx->pos))
+                       goto out;
+               if (le32_to_cpu(de->inode)) {
+                       if (!dir_emit(ctx, de->name, de->name_len,
+                                     le32_to_cpu(de->inode),
+                                     get_dtype(sb, de->file_type)))
                                goto out;
-                       }
-                       if (le32_to_cpu(de->inode)) {
-                               /* We might block in the next section
-                                * if the data destination is
-                                * currently swapped out.  So, use a
-                                * version stamp to detect whether or
-                                * not the directory has been modified
-                                * during the copy operation.
-                                */
-                               u64 version = filp->f_version;
-
-                               error = filldir(dirent, de->name,
-                                               de->name_len,
-                                               filp->f_pos,
-                                               le32_to_cpu(de->inode),
-                                               get_dtype(sb, de->file_type));
-                               if (error)
-                                       break;
-                               if (version != filp->f_version)
-                                       goto revalidate;
-                               stored++;
-                       }
-                       filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
-                                                             extra_size);
                }
+               ctx->pos += ext4_rec_len_from_disk(de->rec_len, extra_size);
        }
 out:
        kfree(dir_buf);
@@ -1842,7 +1810,7 @@ int ext4_inline_data_fiemap(struct inode *inode,
        if (error)
                goto out;
 
-       physical = iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
+       physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
        physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
        physical += offsetof(struct ext4_inode, i_block);
        length = i_size_read(inode);
index d6382b89ecbde3077720ebc6a9bb56254883e2d7..0188e65e1f589efbf1be3726ad1cc1a2236a0739 100644 (file)
@@ -132,12 +132,12 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
                                                   new_size);
 }
 
-static void ext4_invalidatepage(struct page *page, unsigned long offset);
+static void ext4_invalidatepage(struct page *page, unsigned int offset,
+                               unsigned int length);
 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
-static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
-               struct inode *inode, struct page *page, loff_t from,
-               loff_t length, int flags);
+static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+                                 int pextents);
 
 /*
  * Test whether an inode is a fast symlink.
@@ -215,7 +215,8 @@ void ext4_evict_inode(struct inode *inode)
                        filemap_write_and_wait(&inode->i_data);
                }
                truncate_inode_pages(&inode->i_data, 0);
-               ext4_ioend_shutdown(inode);
+
+               WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
                goto no_delete;
        }
 
@@ -225,8 +226,8 @@ void ext4_evict_inode(struct inode *inode)
        if (ext4_should_order_data(inode))
                ext4_begin_ordered_truncate(inode, 0);
        truncate_inode_pages(&inode->i_data, 0);
-       ext4_ioend_shutdown(inode);
 
+       WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
        if (is_bad_inode(inode))
                goto no_delete;
 
@@ -423,66 +424,6 @@ static int __check_block_validity(struct inode *inode, const char *func,
 #define check_block_validity(inode, map)       \
        __check_block_validity((inode), __func__, __LINE__, (map))
 
-/*
- * Return the number of contiguous dirty pages in a given inode
- * starting at page frame idx.
- */
-static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
-                                   unsigned int max_pages)
-{
-       struct address_space *mapping = inode->i_mapping;
-       pgoff_t index;
-       struct pagevec pvec;
-       pgoff_t num = 0;
-       int i, nr_pages, done = 0;
-
-       if (max_pages == 0)
-               return 0;
-       pagevec_init(&pvec, 0);
-       while (!done) {
-               index = idx;
-               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                                             PAGECACHE_TAG_DIRTY,
-                                             (pgoff_t)PAGEVEC_SIZE);
-               if (nr_pages == 0)
-                       break;
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       struct buffer_head *bh, *head;
-
-                       lock_page(page);
-                       if (unlikely(page->mapping != mapping) ||
-                           !PageDirty(page) ||
-                           PageWriteback(page) ||
-                           page->index != idx) {
-                               done = 1;
-                               unlock_page(page);
-                               break;
-                       }
-                       if (page_has_buffers(page)) {
-                               bh = head = page_buffers(page);
-                               do {
-                                       if (!buffer_delay(bh) &&
-                                           !buffer_unwritten(bh))
-                                               done = 1;
-                                       bh = bh->b_this_page;
-                               } while (!done && (bh != head));
-                       }
-                       unlock_page(page);
-                       if (done)
-                               break;
-                       idx++;
-                       num++;
-                       if (num >= max_pages) {
-                               done = 1;
-                               break;
-                       }
-               }
-               pagevec_release(&pvec);
-       }
-       return num;
-}
-
 #ifdef ES_AGGRESSIVE_TEST
 static void ext4_map_blocks_es_recheck(handle_t *handle,
                                       struct inode *inode,
@@ -573,6 +514,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                  "logical block %lu\n", inode->i_ino, flags, map->m_len,
                  (unsigned long) map->m_lblk);
 
+       ext4_es_lru_add(inode);
+
        /* Lookup extent status tree firstly */
        if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
                if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
@@ -1118,10 +1061,13 @@ static int ext4_write_end(struct file *file,
                }
        }
 
-       if (ext4_has_inline_data(inode))
-               copied = ext4_write_inline_data_end(inode, pos, len,
-                                                   copied, page);
-       else
+       if (ext4_has_inline_data(inode)) {
+               ret = ext4_write_inline_data_end(inode, pos, len,
+                                                copied, page);
+               if (ret < 0)
+                       goto errout;
+               copied = ret;
+       } else
                copied = block_write_end(file, mapping, pos,
                                         len, copied, page, fsdata);
 
@@ -1157,8 +1103,6 @@ static int ext4_write_end(struct file *file,
        if (i_size_changed)
                ext4_mark_inode_dirty(handle, inode);
 
-       if (copied < 0)
-               ret = copied;
        if (pos + len > inode->i_size && ext4_can_truncate(inode))
                /* if we have allocated more blocks and copied
                 * less. We will have blocks allocated outside
@@ -1415,21 +1359,28 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
 }
 
 static void ext4_da_page_release_reservation(struct page *page,
-                                            unsigned long offset)
+                                            unsigned int offset,
+                                            unsigned int length)
 {
        int to_release = 0;
        struct buffer_head *head, *bh;
        unsigned int curr_off = 0;
        struct inode *inode = page->mapping->host;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       unsigned int stop = offset + length;
        int num_clusters;
        ext4_fsblk_t lblk;
 
+       BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+
        head = page_buffers(page);
        bh = head;
        do {
                unsigned int next_off = curr_off + bh->b_size;
 
+               if (next_off > stop)
+                       break;
+
                if ((offset <= curr_off) && (buffer_delay(bh))) {
                        to_release++;
                        clear_buffer_delay(bh);
@@ -1460,140 +1411,43 @@ static void ext4_da_page_release_reservation(struct page *page,
  * Delayed allocation stuff
  */
 
-/*
- * mpage_da_submit_io - walks through extent of pages and try to write
- * them with writepage() call back
- *
- * @mpd->inode: inode
- * @mpd->first_page: first page of the extent
- * @mpd->next_page: page after the last page of the extent
- *
- * By the time mpage_da_submit_io() is called we expect all blocks
- * to be allocated. this may be wrong if allocation failed.
- *
- * As pages are already locked by write_cache_pages(), we can't use it
- */
-static int mpage_da_submit_io(struct mpage_da_data *mpd,
-                             struct ext4_map_blocks *map)
-{
-       struct pagevec pvec;
-       unsigned long index, end;
-       int ret = 0, err, nr_pages, i;
-       struct inode *inode = mpd->inode;
-       struct address_space *mapping = inode->i_mapping;
-       loff_t size = i_size_read(inode);
-       unsigned int len, block_start;
-       struct buffer_head *bh, *page_bufs = NULL;
-       sector_t pblock = 0, cur_logical = 0;
-       struct ext4_io_submit io_submit;
+struct mpage_da_data {
+       struct inode *inode;
+       struct writeback_control *wbc;
 
-       BUG_ON(mpd->next_page <= mpd->first_page);
-       memset(&io_submit, 0, sizeof(io_submit));
+       pgoff_t first_page;     /* The first page to write */
+       pgoff_t next_page;      /* Current page to examine */
+       pgoff_t last_page;      /* Last page to examine */
        /*
-        * We need to start from the first_page to the next_page - 1
-        * to make sure we also write the mapped dirty buffer_heads.
-        * If we look at mpd->b_blocknr we would only be looking
-        * at the currently mapped buffer_heads.
+        * Extent to map - this can be after first_page because that can be
+        * fully mapped. We somewhat abuse m_flags to store whether the extent
+        * is delalloc or unwritten.
         */
-       index = mpd->first_page;
-       end = mpd->next_page - 1;
-
-       pagevec_init(&pvec, 0);
-       while (index <= end) {
-               nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
-               if (nr_pages == 0)
-                       break;
-               for (i = 0; i < nr_pages; i++) {
-                       int skip_page = 0;
-                       struct page *page = pvec.pages[i];
-
-                       index = page->index;
-                       if (index > end)
-                               break;
-
-                       if (index == size >> PAGE_CACHE_SHIFT)
-                               len = size & ~PAGE_CACHE_MASK;
-                       else
-                               len = PAGE_CACHE_SIZE;
-                       if (map) {
-                               cur_logical = index << (PAGE_CACHE_SHIFT -
-                                                       inode->i_blkbits);
-                               pblock = map->m_pblk + (cur_logical -
-                                                       map->m_lblk);
-                       }
-                       index++;
-
-                       BUG_ON(!PageLocked(page));
-                       BUG_ON(PageWriteback(page));
-
-                       bh = page_bufs = page_buffers(page);
-                       block_start = 0;
-                       do {
-                               if (map && (cur_logical >= map->m_lblk) &&
-                                   (cur_logical <= (map->m_lblk +
-                                                    (map->m_len - 1)))) {
-                                       if (buffer_delay(bh)) {
-                                               clear_buffer_delay(bh);
-                                               bh->b_blocknr = pblock;
-                                       }
-                                       if (buffer_unwritten(bh) ||
-                                           buffer_mapped(bh))
-                                               BUG_ON(bh->b_blocknr != pblock);
-                                       if (map->m_flags & EXT4_MAP_UNINIT)
-                                               set_buffer_uninit(bh);
-                                       clear_buffer_unwritten(bh);
-                               }
-
-                               /*
-                                * skip page if block allocation undone and
-                                * block is dirty
-                                */
-                               if (ext4_bh_delay_or_unwritten(NULL, bh))
-                                       skip_page = 1;
-                               bh = bh->b_this_page;
-                               block_start += bh->b_size;
-                               cur_logical++;
-                               pblock++;
-                       } while (bh != page_bufs);
-
-                       if (skip_page) {
-                               unlock_page(page);
-                               continue;
-                       }
-
-                       clear_page_dirty_for_io(page);
-                       err = ext4_bio_write_page(&io_submit, page, len,
-                                                 mpd->wbc);
-                       if (!err)
-                               mpd->pages_written++;
-                       /*
-                        * In error case, we have to continue because
-                        * remaining pages are still locked
-                        */
-                       if (ret == 0)
-                               ret = err;
-               }
-               pagevec_release(&pvec);
-       }
-       ext4_io_submit(&io_submit);
-       return ret;
-}
+       struct ext4_map_blocks map;
+       struct ext4_io_submit io_submit;        /* IO submission data */
+};
 
-static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
+static void mpage_release_unused_pages(struct mpage_da_data *mpd,
+                                      bool invalidate)
 {
        int nr_pages, i;
        pgoff_t index, end;
        struct pagevec pvec;
        struct inode *inode = mpd->inode;
        struct address_space *mapping = inode->i_mapping;
-       ext4_lblk_t start, last;
+
+       /* This is necessary when next_page == 0. */
+       if (mpd->first_page >= mpd->next_page)
+               return;
 
        index = mpd->first_page;
        end   = mpd->next_page - 1;
-
-       start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       ext4_es_remove_extent(inode, start, last - start + 1);
+       if (invalidate) {
+               ext4_lblk_t start, last;
+               start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               ext4_es_remove_extent(inode, start, last - start + 1);
+       }
 
        pagevec_init(&pvec, 0);
        while (index <= end) {
@@ -1606,14 +1460,15 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
                                break;
                        BUG_ON(!PageLocked(page));
                        BUG_ON(PageWriteback(page));
-                       block_invalidatepage(page, 0);
-                       ClearPageUptodate(page);
+                       if (invalidate) {
+                               block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+                               ClearPageUptodate(page);
+                       }
                        unlock_page(page);
                }
                index = pvec.pages[nr_pages - 1]->index + 1;
                pagevec_release(&pvec);
        }
-       return;
 }
 
 static void ext4_print_free_blocks(struct inode *inode)
@@ -1642,215 +1497,6 @@ static void ext4_print_free_blocks(struct inode *inode)
        return;
 }
 
-/*
- * mpage_da_map_and_submit - go through given space, map them
- *       if necessary, and then submit them for I/O
- *
- * @mpd - bh describing space
- *
- * The function skips space we know is already mapped to disk blocks.
- *
- */
-static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
-{
-       int err, blks, get_blocks_flags;
-       struct ext4_map_blocks map, *mapp = NULL;
-       sector_t next = mpd->b_blocknr;
-       unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
-       loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
-       handle_t *handle = NULL;
-
-       /*
-        * If the blocks are mapped already, or we couldn't accumulate
-        * any blocks, then proceed immediately to the submission stage.
-        */
-       if ((mpd->b_size == 0) ||
-           ((mpd->b_state  & (1 << BH_Mapped)) &&
-            !(mpd->b_state & (1 << BH_Delay)) &&
-            !(mpd->b_state & (1 << BH_Unwritten))))
-               goto submit_io;
-
-       handle = ext4_journal_current_handle();
-       BUG_ON(!handle);
-
-       /*
-        * Call ext4_map_blocks() to allocate any delayed allocation
-        * blocks, or to convert an uninitialized extent to be
-        * initialized (in the case where we have written into
-        * one or more preallocated blocks).
-        *
-        * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
-        * indicate that we are on the delayed allocation path.  This
-        * affects functions in many different parts of the allocation
-        * call path.  This flag exists primarily because we don't
-        * want to change *many* call functions, so ext4_map_blocks()
-        * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
-        * inode's allocation semaphore is taken.
-        *
-        * If the blocks in questions were delalloc blocks, set
-        * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
-        * variables are updated after the blocks have been allocated.
-        */
-       map.m_lblk = next;
-       map.m_len = max_blocks;
-       /*
-        * We're in delalloc path and it is possible that we're going to
-        * need more metadata blocks than previously reserved. However
-        * we must not fail because we're in writeback and there is
-        * nothing we can do about it so it might result in data loss.
-        * So use reserved blocks to allocate metadata if possible.
-        */
-       get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
-                          EXT4_GET_BLOCKS_METADATA_NOFAIL;
-       if (ext4_should_dioread_nolock(mpd->inode))
-               get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
-       if (mpd->b_state & (1 << BH_Delay))
-               get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
-
-
-       blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
-       if (blks < 0) {
-               struct super_block *sb = mpd->inode->i_sb;
-
-               err = blks;
-               /*
-                * If get block returns EAGAIN or ENOSPC and there
-                * appears to be free blocks we will just let
-                * mpage_da_submit_io() unlock all of the pages.
-                */
-               if (err == -EAGAIN)
-                       goto submit_io;
-
-               if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
-                       mpd->retval = err;
-                       goto submit_io;
-               }
-
-               /*
-                * get block failure will cause us to loop in
-                * writepages, because a_ops->writepage won't be able
-                * to make progress. The page will be redirtied by
-                * writepage and writepages will again try to write
-                * the same.
-                */
-               if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
-                       ext4_msg(sb, KERN_CRIT,
-                                "delayed block allocation failed for inode %lu "
-                                "at logical offset %llu with max blocks %zd "
-                                "with error %d", mpd->inode->i_ino,
-                                (unsigned long long) next,
-                                mpd->b_size >> mpd->inode->i_blkbits, err);
-                       ext4_msg(sb, KERN_CRIT,
-                               "This should not happen!! Data will be lost");
-                       if (err == -ENOSPC)
-                               ext4_print_free_blocks(mpd->inode);
-               }
-               /* invalidate all the pages */
-               ext4_da_block_invalidatepages(mpd);
-
-               /* Mark this page range as having been completed */
-               mpd->io_done = 1;
-               return;
-       }
-       BUG_ON(blks == 0);
-
-       mapp = &map;
-       if (map.m_flags & EXT4_MAP_NEW) {
-               struct block_device *bdev = mpd->inode->i_sb->s_bdev;
-               int i;
-
-               for (i = 0; i < map.m_len; i++)
-                       unmap_underlying_metadata(bdev, map.m_pblk + i);
-       }
-
-       /*
-        * Update on-disk size along with block allocation.
-        */
-       disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
-       if (disksize > i_size_read(mpd->inode))
-               disksize = i_size_read(mpd->inode);
-       if (disksize > EXT4_I(mpd->inode)->i_disksize) {
-               ext4_update_i_disksize(mpd->inode, disksize);
-               err = ext4_mark_inode_dirty(handle, mpd->inode);
-               if (err)
-                       ext4_error(mpd->inode->i_sb,
-                                  "Failed to mark inode %lu dirty",
-                                  mpd->inode->i_ino);
-       }
-
-submit_io:
-       mpage_da_submit_io(mpd, mapp);
-       mpd->io_done = 1;
-}
-
-#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
-               (1 << BH_Delay) | (1 << BH_Unwritten))
-
-/*
- * mpage_add_bh_to_extent - try to add one more block to extent of blocks
- *
- * @mpd->lbh - extent of blocks
- * @logical - logical number of the block in the file
- * @b_state - b_state of the buffer head added
- *
- * the function is used to collect contig. blocks in same state
- */
-static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical,
-                                  unsigned long b_state)
-{
-       sector_t next;
-       int blkbits = mpd->inode->i_blkbits;
-       int nrblocks = mpd->b_size >> blkbits;
-
-       /*
-        * XXX Don't go larger than mballoc is willing to allocate
-        * This is a stopgap solution.  We eventually need to fold
-        * mpage_da_submit_io() into this function and then call
-        * ext4_map_blocks() multiple times in a loop
-        */
-       if (nrblocks >= (8*1024*1024 >> blkbits))
-               goto flush_it;
-
-       /* check if the reserved journal credits might overflow */
-       if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) {
-               if (nrblocks >= EXT4_MAX_TRANS_DATA) {
-                       /*
-                        * With non-extent format we are limited by the journal
-                        * credit available.  Total credit needed to insert
-                        * nrblocks contiguous blocks is dependent on the
-                        * nrblocks.  So limit nrblocks.
-                        */
-                       goto flush_it;
-               }
-       }
-       /*
-        * First block in the extent
-        */
-       if (mpd->b_size == 0) {
-               mpd->b_blocknr = logical;
-               mpd->b_size = 1 << blkbits;
-               mpd->b_state = b_state & BH_FLAGS;
-               return;
-       }
-
-       next = mpd->b_blocknr + nrblocks;
-       /*
-        * Can we merge the block to our big extent?
-        */
-       if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
-               mpd->b_size += 1 << blkbits;
-               return;
-       }
-
-flush_it:
-       /*
-        * We couldn't merge the block to our extent, so we
-        * need to flush current  extent and start new one
-        */
-       mpage_da_map_and_submit(mpd);
-       return;
-}
-
 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
 {
        return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
@@ -1883,6 +1529,8 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                  "logical block %lu\n", inode->i_ino, map->m_len,
                  (unsigned long) map->m_lblk);
 
+       ext4_es_lru_add(inode);
+
        /* Lookup extent status tree firstly */
        if (ext4_es_lookup_extent(inode, iblock, &es)) {
 
@@ -2156,7 +1804,7 @@ out:
  * lock so we have to do some magic.
  *
  * This function can get called via...
- *   - ext4_da_writepages after taking page lock (have journal handle)
+ *   - ext4_writepages after taking page lock (have journal handle)
  *   - journal_submit_inode_data_buffers (no journal handle)
  *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
  *   - grab_page_cache when doing write_begin (have journal handle)
@@ -2227,83 +1875,412 @@ static int ext4_writepage(struct page *page,
                }
        }
 
-       if (PageChecked(page) && ext4_should_journal_data(inode))
-               /*
-                * It's mmapped pagecache.  Add buffers and journal it.  There
-                * doesn't seem much point in redirtying the page here.
-                */
-               return __ext4_journalled_writepage(page, len);
+       if (PageChecked(page) && ext4_should_journal_data(inode))
+               /*
+                * It's mmapped pagecache.  Add buffers and journal it.  There
+                * doesn't seem much point in redirtying the page here.
+                */
+               return __ext4_journalled_writepage(page, len);
+
+       ext4_io_submit_init(&io_submit, wbc);
+       io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
+       if (!io_submit.io_end) {
+               redirty_page_for_writepage(wbc, page);
+               unlock_page(page);
+               return -ENOMEM;
+       }
+       ret = ext4_bio_write_page(&io_submit, page, len, wbc);
+       ext4_io_submit(&io_submit);
+       /* Drop io_end reference we got from init */
+       ext4_put_io_end_defer(io_submit.io_end);
+       return ret;
+}
+
+#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
+
+/*
+ * mballoc gives us at most this number of blocks...
+ * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
+ * The rest of mballoc seems to handle chunks upto full group size.
+ */
+#define MAX_WRITEPAGES_EXTENT_LEN 2048
+
+/*
+ * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
+ *
+ * @mpd - extent of blocks
+ * @lblk - logical number of the block in the file
+ * @b_state - b_state of the buffer head added
+ *
+ * the function is used to collect contig. blocks in same state
+ */
+static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
+                                 unsigned long b_state)
+{
+       struct ext4_map_blocks *map = &mpd->map;
+
+       /* Don't go larger than mballoc is willing to allocate */
+       if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
+               return 0;
+
+       /* First block in the extent? */
+       if (map->m_len == 0) {
+               map->m_lblk = lblk;
+               map->m_len = 1;
+               map->m_flags = b_state & BH_FLAGS;
+               return 1;
+       }
+
+       /* Can we merge the block to our big extent? */
+       if (lblk == map->m_lblk + map->m_len &&
+           (b_state & BH_FLAGS) == map->m_flags) {
+               map->m_len++;
+               return 1;
+       }
+       return 0;
+}
+
+static bool add_page_bufs_to_extent(struct mpage_da_data *mpd,
+                                   struct buffer_head *head,
+                                   struct buffer_head *bh,
+                                   ext4_lblk_t lblk)
+{
+       struct inode *inode = mpd->inode;
+       ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
+                                                       >> inode->i_blkbits;
+
+       do {
+               BUG_ON(buffer_locked(bh));
+
+               if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
+                   (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
+                   lblk >= blocks) {
+                       /* Found extent to map? */
+                       if (mpd->map.m_len)
+                               return false;
+                       if (lblk >= blocks)
+                               return true;
+                       continue;
+               }
+               if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
+                       return false;
+       } while (lblk++, (bh = bh->b_this_page) != head);
+       return true;
+}
+
+static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
+{
+       int len;
+       loff_t size = i_size_read(mpd->inode);
+       int err;
+
+       BUG_ON(page->index != mpd->first_page);
+       if (page->index == size >> PAGE_CACHE_SHIFT)
+               len = size & ~PAGE_CACHE_MASK;
+       else
+               len = PAGE_CACHE_SIZE;
+       clear_page_dirty_for_io(page);
+       err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
+       if (!err)
+               mpd->wbc->nr_to_write--;
+       mpd->first_page++;
+
+       return err;
+}
+
+/*
+ * mpage_map_buffers - update buffers corresponding to changed extent and
+ *                    submit fully mapped pages for IO
+ *
+ * @mpd - description of extent to map, on return next extent to map
+ *
+ * Scan buffers corresponding to changed extent (we expect corresponding pages
+ * to be already locked) and update buffer state according to new extent state.
+ * We map delalloc buffers to their physical location, clear unwritten bits,
+ * and mark buffers as uninit when we perform writes to uninitialized extents
+ * and do extent conversion after IO is finished. If the last page is not fully
+ * mapped, we update @map to the next extent in the last page that needs
+ * mapping. Otherwise we submit the page for IO.
+ */
+static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
+{
+       struct pagevec pvec;
+       int nr_pages, i;
+       struct inode *inode = mpd->inode;
+       struct buffer_head *head, *bh;
+       int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
+       ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
+                                                       >> inode->i_blkbits;
+       pgoff_t start, end;
+       ext4_lblk_t lblk;
+       sector_t pblock;
+       int err;
+
+       start = mpd->map.m_lblk >> bpp_bits;
+       end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
+       lblk = start << bpp_bits;
+       pblock = mpd->map.m_pblk;
+
+       pagevec_init(&pvec, 0);
+       while (start <= end) {
+               nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
+                                         PAGEVEC_SIZE);
+               if (nr_pages == 0)
+                       break;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+
+                       if (page->index > end)
+                               break;
+                       /* Upto 'end' pages must be contiguous */
+                       BUG_ON(page->index != start);
+                       bh = head = page_buffers(page);
+                       do {
+                               if (lblk < mpd->map.m_lblk)
+                                       continue;
+                               if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
+                                       /*
+                                        * Buffer after end of mapped extent.
+                                        * Find next buffer in the page to map.
+                                        */
+                                       mpd->map.m_len = 0;
+                                       mpd->map.m_flags = 0;
+                                       add_page_bufs_to_extent(mpd, head, bh,
+                                                               lblk);
+                                       pagevec_release(&pvec);
+                                       return 0;
+                               }
+                               if (buffer_delay(bh)) {
+                                       clear_buffer_delay(bh);
+                                       bh->b_blocknr = pblock++;
+                               }
+                               clear_buffer_unwritten(bh);
+                       } while (++lblk < blocks &&
+                                (bh = bh->b_this_page) != head);
+
+                       /*
+                        * FIXME: This is going to break if dioread_nolock
+                        * supports blocksize < pagesize as we will try to
+                        * convert potentially unmapped parts of inode.
+                        */
+                       mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
+                       /* Page fully mapped - let IO run! */
+                       err = mpage_submit_page(mpd, page);
+                       if (err < 0) {
+                               pagevec_release(&pvec);
+                               return err;
+                       }
+                       start++;
+               }
+               pagevec_release(&pvec);
+       }
+       /* Extent fully mapped and matches with page boundary. We are done. */
+       mpd->map.m_len = 0;
+       mpd->map.m_flags = 0;
+       return 0;
+}
+
+static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
+{
+       struct inode *inode = mpd->inode;
+       struct ext4_map_blocks *map = &mpd->map;
+       int get_blocks_flags;
+       int err;
+
+       trace_ext4_da_write_pages_extent(inode, map);
+       /*
+        * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
+        * to convert an uninitialized extent to be initialized (in the case
+        * where we have written into one or more preallocated blocks).  It is
+        * possible that we're going to need more metadata blocks than
+        * previously reserved. However we must not fail because we're in
+        * writeback and there is nothing we can do about it so it might result
+        * in data loss.  So use reserved blocks to allocate metadata if
+        * possible.
+        *
+        * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks
+        * in question are delalloc blocks.  This affects functions in many
+        * different parts of the allocation call path.  This flag exists
+        * primarily because we don't want to change *many* call functions, so
+        * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag
+        * once the inode's allocation semaphore is taken.
+        */
+       get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
+                          EXT4_GET_BLOCKS_METADATA_NOFAIL;
+       if (ext4_should_dioread_nolock(inode))
+               get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
+       if (map->m_flags & (1 << BH_Delay))
+               get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
+       err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
+       if (err < 0)
+               return err;
+       if (map->m_flags & EXT4_MAP_UNINIT) {
+               if (!mpd->io_submit.io_end->handle &&
+                   ext4_handle_valid(handle)) {
+                       mpd->io_submit.io_end->handle = handle->h_rsv_handle;
+                       handle->h_rsv_handle = NULL;
+               }
+               ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
+       }
+
+       BUG_ON(map->m_len == 0);
+       if (map->m_flags & EXT4_MAP_NEW) {
+               struct block_device *bdev = inode->i_sb->s_bdev;
+               int i;
+
+               for (i = 0; i < map->m_len; i++)
+                       unmap_underlying_metadata(bdev, map->m_pblk + i);
+       }
+       return 0;
+}
+
+/*
+ * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
+ *                              mpd->len and submit pages underlying it for IO
+ *
+ * @handle - handle for journal operations
+ * @mpd - extent to map
+ *
+ * The function maps extent starting at mpd->lblk of length mpd->len. If it is
+ * delayed, blocks are allocated, if it is unwritten, we may need to convert
+ * them to initialized or split the described range from larger unwritten
+ * extent. Note that we need not map all the described range since allocation
+ * can return less blocks or the range is covered by more unwritten extents. We
+ * cannot map more because we are limited by reserved transaction credits. On
+ * the other hand we always make sure that the last touched page is fully
+ * mapped so that it can be written out (and thus forward progress is
+ * guaranteed). After mapping we submit all mapped pages for IO.
+ */
+static int mpage_map_and_submit_extent(handle_t *handle,
+                                      struct mpage_da_data *mpd,
+                                      bool *give_up_on_write)
+{
+       struct inode *inode = mpd->inode;
+       struct ext4_map_blocks *map = &mpd->map;
+       int err;
+       loff_t disksize;
+
+       mpd->io_submit.io_end->offset =
+                               ((loff_t)map->m_lblk) << inode->i_blkbits;
+       while (map->m_len) {
+               err = mpage_map_one_extent(handle, mpd);
+               if (err < 0) {
+                       struct super_block *sb = inode->i_sb;
+
+                       if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+                               goto invalidate_dirty_pages;
+                       /*
+                        * Let the uper layers retry transient errors.
+                        * In the case of ENOSPC, if ext4_count_free_blocks()
+                        * is non-zero, a commit should free up blocks.
+                        */
+                       if ((err == -ENOMEM) ||
+                           (err == -ENOSPC && ext4_count_free_clusters(sb)))
+                               return err;
+                       ext4_msg(sb, KERN_CRIT,
+                                "Delayed block allocation failed for "
+                                "inode %lu at logical offset %llu with"
+                                " max blocks %u with error %d",
+                                inode->i_ino,
+                                (unsigned long long)map->m_lblk,
+                                (unsigned)map->m_len, -err);
+                       ext4_msg(sb, KERN_CRIT,
+                                "This should not happen!! Data will "
+                                "be lost\n");
+                       if (err == -ENOSPC)
+                               ext4_print_free_blocks(inode);
+               invalidate_dirty_pages:
+                       *give_up_on_write = true;
+                       return err;
+               }
+               /*
+                * Update buffer state, submit mapped pages, and get us new
+                * extent to map
+                */
+               err = mpage_map_and_submit_buffers(mpd);
+               if (err < 0)
+                       return err;
+       }
+
+       /* Update on-disk size after IO is submitted */
+       disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
+       if (disksize > i_size_read(inode))
+               disksize = i_size_read(inode);
+       if (disksize > EXT4_I(inode)->i_disksize) {
+               int err2;
 
-       memset(&io_submit, 0, sizeof(io_submit));
-       ret = ext4_bio_write_page(&io_submit, page, len, wbc);
-       ext4_io_submit(&io_submit);
-       return ret;
+               ext4_update_i_disksize(inode, disksize);
+               err2 = ext4_mark_inode_dirty(handle, inode);
+               if (err2)
+                       ext4_error(inode->i_sb,
+                                  "Failed to mark inode %lu dirty",
+                                  inode->i_ino);
+               if (!err)
+                       err = err2;
+       }
+       return err;
 }
 
 /*
- * This is called via ext4_da_writepages() to
- * calculate the total number of credits to reserve to fit
- * a single extent allocation into a single transaction,
- * ext4_da_writpeages() will loop calling this before
- * the block allocation.
+ * Calculate the total number of credits to reserve for one writepages
+ * iteration. This is called from ext4_writepages(). We map an extent of
+ * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
+ * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
+ * bpp - 1 blocks in bpp different extents.
  */
-
 static int ext4_da_writepages_trans_blocks(struct inode *inode)
 {
-       int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
-
-       /*
-        * With non-extent format the journal credit needed to
-        * insert nrblocks contiguous block is dependent on
-        * number of contiguous block. So we will limit
-        * number of contiguous block to a sane value
-        */
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
-           (max_blocks > EXT4_MAX_TRANS_DATA))
-               max_blocks = EXT4_MAX_TRANS_DATA;
+       int bpp = ext4_journal_blocks_per_page(inode);
 
-       return ext4_chunk_trans_blocks(inode, max_blocks);
+       return ext4_meta_trans_blocks(inode,
+                               MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
 }
 
 /*
- * write_cache_pages_da - walk the list of dirty pages of the given
- * address space and accumulate pages that need writing, and call
- * mpage_da_map_and_submit to map a single contiguous memory region
- * and then write them.
+ * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
+ *                              and underlying extent to map
+ *
+ * @mpd - where to look for pages
+ *
+ * Walk dirty pages in the mapping. If they are fully mapped, submit them for
+ * IO immediately. When we find a page which isn't mapped we start accumulating
+ * extent of buffers underlying these pages that needs mapping (formed by
+ * either delayed or unwritten buffers). We also lock the pages containing
+ * these buffers. The extent found is returned in @mpd structure (starting at
+ * mpd->lblk with length mpd->len blocks).
+ *
+ * Note that this function can attach bios to one io_end structure which are
+ * neither logically nor physically contiguous. Although it may seem as an
+ * unnecessary complication, it is actually inevitable in blocksize < pagesize
+ * case as we need to track IO to all buffers underlying a page in one io_end.
  */
-static int write_cache_pages_da(handle_t *handle,
-                               struct address_space *mapping,
-                               struct writeback_control *wbc,
-                               struct mpage_da_data *mpd,
-                               pgoff_t *done_index)
+static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
 {
-       struct buffer_head      *bh, *head;
-       struct inode            *inode = mapping->host;
-       struct pagevec          pvec;
-       unsigned int            nr_pages;
-       sector_t                logical;
-       pgoff_t                 index, end;
-       long                    nr_to_write = wbc->nr_to_write;
-       int                     i, tag, ret = 0;
-
-       memset(mpd, 0, sizeof(struct mpage_da_data));
-       mpd->wbc = wbc;
-       mpd->inode = inode;
-       pagevec_init(&pvec, 0);
-       index = wbc->range_start >> PAGE_CACHE_SHIFT;
-       end = wbc->range_end >> PAGE_CACHE_SHIFT;
+       struct address_space *mapping = mpd->inode->i_mapping;
+       struct pagevec pvec;
+       unsigned int nr_pages;
+       pgoff_t index = mpd->first_page;
+       pgoff_t end = mpd->last_page;
+       int tag;
+       int i, err = 0;
+       int blkbits = mpd->inode->i_blkbits;
+       ext4_lblk_t lblk;
+       struct buffer_head *head;
 
-       if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+       if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
                tag = PAGECACHE_TAG_TOWRITE;
        else
                tag = PAGECACHE_TAG_DIRTY;
 
-       *done_index = index;
+       pagevec_init(&pvec, 0);
+       mpd->map.m_len = 0;
+       mpd->next_page = index;
        while (index <= end) {
                nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
                              min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
                if (nr_pages == 0)
-                       return 0;
+                       goto out;
 
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
@@ -2318,31 +2295,21 @@ static int write_cache_pages_da(handle_t *handle,
                        if (page->index > end)
                                goto out;
 
-                       *done_index = page->index + 1;
-
-                       /*
-                        * If we can't merge this page, and we have
-                        * accumulated an contiguous region, write it
-                        */
-                       if ((mpd->next_page != page->index) &&
-                           (mpd->next_page != mpd->first_page)) {
-                               mpage_da_map_and_submit(mpd);
-                               goto ret_extent_tail;
-                       }
+                       /* If we can't merge this page, we are done. */
+                       if (mpd->map.m_len > 0 && mpd->next_page != page->index)
+                               goto out;
 
                        lock_page(page);
-
                        /*
-                        * If the page is no longer dirty, or its
-                        * mapping no longer corresponds to inode we
-                        * are writing (which means it has been
-                        * truncated or invalidated), or the page is
-                        * already under writeback and we are not
-                        * doing a data integrity writeback, skip the page
+                        * If the page is no longer dirty, or its mapping no
+                        * longer corresponds to inode we are writing (which
+                        * means it has been truncated or invalidated), or the
+                        * page is already under writeback and we are not doing
+                        * a data integrity writeback, skip the page
                         */
                        if (!PageDirty(page) ||
                            (PageWriteback(page) &&
-                            (wbc->sync_mode == WB_SYNC_NONE)) ||
+                            (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
                            unlikely(page->mapping != mapping)) {
                                unlock_page(page);
                                continue;
@@ -2351,106 +2318,70 @@ static int write_cache_pages_da(handle_t *handle,
                        wait_on_page_writeback(page);
                        BUG_ON(PageWriteback(page));
 
-                       /*
-                        * If we have inline data and arrive here, it means that
-                        * we will soon create the block for the 1st page, so
-                        * we'd better clear the inline data here.
-                        */
-                       if (ext4_has_inline_data(inode)) {
-                               BUG_ON(ext4_test_inode_state(inode,
-                                               EXT4_STATE_MAY_INLINE_DATA));
-                               ext4_destroy_inline_data(handle, inode);
-                       }
-
-                       if (mpd->next_page != page->index)
+                       if (mpd->map.m_len == 0)
                                mpd->first_page = page->index;
                        mpd->next_page = page->index + 1;
-                       logical = (sector_t) page->index <<
-                               (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
                        /* Add all dirty buffers to mpd */
+                       lblk = ((ext4_lblk_t)page->index) <<
+                               (PAGE_CACHE_SHIFT - blkbits);
                        head = page_buffers(page);
-                       bh = head;
-                       do {
-                               BUG_ON(buffer_locked(bh));
-                               /*
-                                * We need to try to allocate unmapped blocks
-                                * in the same page.  Otherwise we won't make
-                                * progress with the page in ext4_writepage
-                                */
-                               if (ext4_bh_delay_or_unwritten(NULL, bh)) {
-                                       mpage_add_bh_to_extent(mpd, logical,
-                                                              bh->b_state);
-                                       if (mpd->io_done)
-                                               goto ret_extent_tail;
-                               } else if (buffer_dirty(bh) &&
-                                          buffer_mapped(bh)) {
-                                       /*
-                                        * mapped dirty buffer. We need to
-                                        * update the b_state because we look
-                                        * at b_state in mpage_da_map_blocks.
-                                        * We don't update b_size because if we
-                                        * find an unmapped buffer_head later
-                                        * we need to use the b_state flag of
-                                        * that buffer_head.
-                                        */
-                                       if (mpd->b_size == 0)
-                                               mpd->b_state =
-                                                       bh->b_state & BH_FLAGS;
-                               }
-                               logical++;
-                       } while ((bh = bh->b_this_page) != head);
-
-                       if (nr_to_write > 0) {
-                               nr_to_write--;
-                               if (nr_to_write == 0 &&
-                                   wbc->sync_mode == WB_SYNC_NONE)
-                                       /*
-                                        * We stop writing back only if we are
-                                        * not doing integrity sync. In case of
-                                        * integrity sync we have to keep going
-                                        * because someone may be concurrently
-                                        * dirtying pages, and we might have
-                                        * synced a lot of newly appeared dirty
-                                        * pages, but have not synced all of the
-                                        * old dirty pages.
-                                        */
+                       if (!add_page_bufs_to_extent(mpd, head, head, lblk))
+                               goto out;
+                       /* So far everything mapped? Submit the page for IO. */
+                       if (mpd->map.m_len == 0) {
+                               err = mpage_submit_page(mpd, page);
+                               if (err < 0)
                                        goto out;
                        }
+
+                       /*
+                        * Accumulated enough dirty pages? This doesn't apply
+                        * to WB_SYNC_ALL mode. For integrity sync we have to
+                        * keep going because someone may be concurrently
+                        * dirtying pages, and we might have synced a lot of
+                        * newly appeared dirty pages, but have not synced all
+                        * of the old dirty pages.
+                        */
+                       if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
+                           mpd->next_page - mpd->first_page >=
+                                                       mpd->wbc->nr_to_write)
+                               goto out;
                }
                pagevec_release(&pvec);
                cond_resched();
        }
        return 0;
-ret_extent_tail:
-       ret = MPAGE_DA_EXTENT_TAIL;
 out:
        pagevec_release(&pvec);
-       cond_resched();
-       return ret;
+       return err;
 }
 
+static int __writepage(struct page *page, struct writeback_control *wbc,
+                      void *data)
+{
+       struct address_space *mapping = data;
+       int ret = ext4_writepage(page, wbc);
+       mapping_set_error(mapping, ret);
+       return ret;
+}
 
-static int ext4_da_writepages(struct address_space *mapping,
-                             struct writeback_control *wbc)
+static int ext4_writepages(struct address_space *mapping,
+                          struct writeback_control *wbc)
 {
-       pgoff_t index;
+       pgoff_t writeback_index = 0;
+       long nr_to_write = wbc->nr_to_write;
        int range_whole = 0;
+       int cycled = 1;
        handle_t *handle = NULL;
        struct mpage_da_data mpd;
        struct inode *inode = mapping->host;
-       int pages_written = 0;
-       unsigned int max_pages;
-       int range_cyclic, cycled = 1, io_done = 0;
-       int needed_blocks, ret = 0;
-       long desired_nr_to_write, nr_to_writebump = 0;
-       loff_t range_start = wbc->range_start;
+       int needed_blocks, rsv_blocks = 0, ret = 0;
        struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
-       pgoff_t done_index = 0;
-       pgoff_t end;
+       bool done;
        struct blk_plug plug;
+       bool give_up_on_write = false;
 
-       trace_ext4_da_writepages(inode, wbc);
+       trace_ext4_writepages(inode, wbc);
 
        /*
         * No pages to write? This is mainly a kludge to avoid starting
@@ -2460,164 +2391,165 @@ static int ext4_da_writepages(struct address_space *mapping,
        if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
                return 0;
 
+       if (ext4_should_journal_data(inode)) {
+               struct blk_plug plug;
+               int ret;
+
+               blk_start_plug(&plug);
+               ret = write_cache_pages(mapping, wbc, __writepage, mapping);
+               blk_finish_plug(&plug);
+               return ret;
+       }
+
        /*
         * If the filesystem has aborted, it is read-only, so return
         * right away instead of dumping stack traces later on that
         * will obscure the real source of the problem.  We test
         * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
         * the latter could be true if the filesystem is mounted
-        * read-only, and in that case, ext4_da_writepages should
+        * read-only, and in that case, ext4_writepages should
         * *never* be called, so if that ever happens, we would want
         * the stack trace.
         */
        if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
                return -EROFS;
 
+       if (ext4_should_dioread_nolock(inode)) {
+               /*
+                * We may need to convert upto one extent per block in
+                * the page and we may dirty the inode.
+                */
+               rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
+       }
+
+       /*
+        * If we have inline data and arrive here, it means that
+        * we will soon create the block for the 1st page, so
+        * we'd better clear the inline data here.
+        */
+       if (ext4_has_inline_data(inode)) {
+               /* Just inode will be modified... */
+               handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
+               if (IS_ERR(handle)) {
+                       ret = PTR_ERR(handle);
+                       goto out_writepages;
+               }
+               BUG_ON(ext4_test_inode_state(inode,
+                               EXT4_STATE_MAY_INLINE_DATA));
+               ext4_destroy_inline_data(handle, inode);
+               ext4_journal_stop(handle);
+       }
+
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                range_whole = 1;
 
-       range_cyclic = wbc->range_cyclic;
        if (wbc->range_cyclic) {
-               index = mapping->writeback_index;
-               if (index)
+               writeback_index = mapping->writeback_index;
+               if (writeback_index)
                        cycled = 0;
-               wbc->range_start = index << PAGE_CACHE_SHIFT;
-               wbc->range_end  = LLONG_MAX;
-               wbc->range_cyclic = 0;
-               end = -1;
+               mpd.first_page = writeback_index;
+               mpd.last_page = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
-       }
-
-       /*
-        * This works around two forms of stupidity.  The first is in
-        * the writeback code, which caps the maximum number of pages
-        * written to be 1024 pages.  This is wrong on multiple
-        * levels; different architectues have a different page size,
-        * which changes the maximum amount of data which gets
-        * written.  Secondly, 4 megabytes is way too small.  XFS
-        * forces this value to be 16 megabytes by multiplying
-        * nr_to_write parameter by four, and then relies on its
-        * allocator to allocate larger extents to make them
-        * contiguous.  Unfortunately this brings us to the second
-        * stupidity, which is that ext4's mballoc code only allocates
-        * at most 2048 blocks.  So we force contiguous writes up to
-        * the number of dirty blocks in the inode, or
-        * sbi->max_writeback_mb_bump whichever is smaller.
-        */
-       max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
-       if (!range_cyclic && range_whole) {
-               if (wbc->nr_to_write == LONG_MAX)
-                       desired_nr_to_write = wbc->nr_to_write;
-               else
-                       desired_nr_to_write = wbc->nr_to_write * 8;
-       } else
-               desired_nr_to_write = ext4_num_dirty_pages(inode, index,
-                                                          max_pages);
-       if (desired_nr_to_write > max_pages)
-               desired_nr_to_write = max_pages;
-
-       if (wbc->nr_to_write < desired_nr_to_write) {
-               nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
-               wbc->nr_to_write = desired_nr_to_write;
+               mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
+               mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
        }
 
+       mpd.inode = inode;
+       mpd.wbc = wbc;
+       ext4_io_submit_init(&mpd.io_submit, wbc);
 retry:
        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
-               tag_pages_for_writeback(mapping, index, end);
-
+               tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
+       done = false;
        blk_start_plug(&plug);
-       while (!ret && wbc->nr_to_write > 0) {
+       while (!done && mpd.first_page <= mpd.last_page) {
+               /* For each extent of pages we use new io_end */
+               mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
+               if (!mpd.io_submit.io_end) {
+                       ret = -ENOMEM;
+                       break;
+               }
 
                /*
-                * we  insert one extent at a time. So we need
-                * credit needed for single extent allocation.
-                * journalled mode is currently not supported
-                * by delalloc
+                * We have two constraints: We find one extent to map and we
+                * must always write out whole page (makes a difference when
+                * blocksize < pagesize) so that we don't block on IO when we
+                * try to write out the rest of the page. Journalled mode is
+                * not supported by delalloc.
                 */
                BUG_ON(ext4_should_journal_data(inode));
                needed_blocks = ext4_da_writepages_trans_blocks(inode);
 
-               /* start a new transaction*/
-               handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
-                                           needed_blocks);
+               /* start a new transaction */
+               handle = ext4_journal_start_with_reserve(inode,
+                               EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
                               "%ld pages, ino %lu; err %d", __func__,
                                wbc->nr_to_write, inode->i_ino, ret);
-                       blk_finish_plug(&plug);
-                       goto out_writepages;
+                       /* Release allocated io_end */
+                       ext4_put_io_end(mpd.io_submit.io_end);
+                       break;
                }
 
-               /*
-                * Now call write_cache_pages_da() to find the next
-                * contiguous region of logical blocks that need
-                * blocks to be allocated by ext4 and submit them.
-                */
-               ret = write_cache_pages_da(handle, mapping,
-                                          wbc, &mpd, &done_index);
-               /*
-                * If we have a contiguous extent of pages and we
-                * haven't done the I/O yet, map the blocks and submit
-                * them for I/O.
-                */
-               if (!mpd.io_done && mpd.next_page != mpd.first_page) {
-                       mpage_da_map_and_submit(&mpd);
-                       ret = MPAGE_DA_EXTENT_TAIL;
+               trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
+               ret = mpage_prepare_extent_to_map(&mpd);
+               if (!ret) {
+                       if (mpd.map.m_len)
+                               ret = mpage_map_and_submit_extent(handle, &mpd,
+                                       &give_up_on_write);
+                       else {
+                               /*
+                                * We scanned the whole range (or exhausted
+                                * nr_to_write), submitted what was mapped and
+                                * didn't find anything needing mapping. We are
+                                * done.
+                                */
+                               done = true;
+                       }
                }
-               trace_ext4_da_write_pages(inode, &mpd);
-               wbc->nr_to_write -= mpd.pages_written;
-
                ext4_journal_stop(handle);
-
-               if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
-                       /* commit the transaction which would
+               /* Submit prepared bio */
+               ext4_io_submit(&mpd.io_submit);
+               /* Unlock pages we didn't use */
+               mpage_release_unused_pages(&mpd, give_up_on_write);
+               /* Drop our io_end reference we got from init */
+               ext4_put_io_end(mpd.io_submit.io_end);
+
+               if (ret == -ENOSPC && sbi->s_journal) {
+                       /*
+                        * Commit the transaction which would
                         * free blocks released in the transaction
                         * and try again
                         */
                        jbd2_journal_force_commit_nested(sbi->s_journal);
                        ret = 0;
-               } else if (ret == MPAGE_DA_EXTENT_TAIL) {
-                       /*
-                        * Got one extent now try with rest of the pages.
-                        * If mpd.retval is set -EIO, journal is aborted.
-                        * So we don't need to write any more.
-                        */
-                       pages_written += mpd.pages_written;
-                       ret = mpd.retval;
-                       io_done = 1;
-               } else if (wbc->nr_to_write)
-                       /*
-                        * There is no more writeout needed
-                        * or we requested for a noblocking writeout
-                        * and we found the device congested
-                        */
+                       continue;
+               }
+               /* Fatal error - ENOMEM, EIO... */
+               if (ret)
                        break;
        }
        blk_finish_plug(&plug);
-       if (!io_done && !cycled) {
+       if (!ret && !cycled) {
                cycled = 1;
-               index = 0;
-               wbc->range_start = index << PAGE_CACHE_SHIFT;
-               wbc->range_end  = mapping->writeback_index - 1;
+               mpd.last_page = writeback_index - 1;
+               mpd.first_page = 0;
                goto retry;
        }
 
        /* Update index */
-       wbc->range_cyclic = range_cyclic;
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                /*
-                * set the writeback_index so that range_cyclic
+                * Set the writeback_index so that range_cyclic
                 * mode will write it back later
                 */
-               mapping->writeback_index = done_index;
+               mapping->writeback_index = mpd.first_page;
 
 out_writepages:
-       wbc->nr_to_write -= nr_to_writebump;
-       wbc->range_start = range_start;
-       trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
+       trace_ext4_writepages_result(inode, wbc, ret,
+                                    nr_to_write - wbc->nr_to_write);
        return ret;
 }
 
@@ -2829,7 +2761,8 @@ static int ext4_da_write_end(struct file *file,
        return ret ? ret : copied;
 }
 
-static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
+static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
+                                  unsigned int length)
 {
        /*
         * Drop reserved blocks
@@ -2838,10 +2771,10 @@ static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
        if (!page_has_buffers(page))
                goto out;
 
-       ext4_da_page_release_reservation(page, offset);
+       ext4_da_page_release_reservation(page, offset, length);
 
 out:
-       ext4_invalidatepage(page, offset);
+       ext4_invalidatepage(page, offset, length);
 
        return;
 }
@@ -2864,7 +2797,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
         * laptop_mode, not even desirable).  However, to do otherwise
         * would require replicating code paths in:
         *
-        * ext4_da_writepages() ->
+        * ext4_writepages() ->
         *    write_cache_pages() ---> (via passed in callback function)
         *        __mpage_da_writepage() -->
         *           mpage_add_bh_to_extent()
@@ -2989,37 +2922,40 @@ ext4_readpages(struct file *file, struct address_space *mapping,
        return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
 }
 
-static void ext4_invalidatepage(struct page *page, unsigned long offset)
+static void ext4_invalidatepage(struct page *page, unsigned int offset,
+                               unsigned int length)
 {
-       trace_ext4_invalidatepage(page, offset);
+       trace_ext4_invalidatepage(page, offset, length);
 
        /* No journalling happens on data buffers when this function is used */
        WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
 
-       block_invalidatepage(page, offset);
+       block_invalidatepage(page, offset, length);
 }
 
 static int __ext4_journalled_invalidatepage(struct page *page,
-                                           unsigned long offset)
+                                           unsigned int offset,
+                                           unsigned int length)
 {
        journal_t *journal = EXT4_JOURNAL(page->mapping->host);
 
-       trace_ext4_journalled_invalidatepage(page, offset);
+       trace_ext4_journalled_invalidatepage(page, offset, length);
 
        /*
         * If it's a full truncate we just forget about the pending dirtying
         */
-       if (offset == 0)
+       if (offset == 0 && length == PAGE_CACHE_SIZE)
                ClearPageChecked(page);
 
-       return jbd2_journal_invalidatepage(journal, page, offset);
+       return jbd2_journal_invalidatepage(journal, page, offset, length);
 }
 
 /* Wrapper for aops... */
 static void ext4_journalled_invalidatepage(struct page *page,
-                                          unsigned long offset)
+                                          unsigned int offset,
+                                          unsigned int length)
 {
-       WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
+       WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
 }
 
 static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -3067,9 +3003,13 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        struct inode *inode = file_inode(iocb->ki_filp);
         ext4_io_end_t *io_end = iocb->private;
 
-       /* if not async direct IO or dio with 0 bytes write, just return */
-       if (!io_end || !size)
-               goto out;
+       /* if not async direct IO just return */
+       if (!io_end) {
+               inode_dio_done(inode);
+               if (is_async)
+                       aio_complete(iocb, ret, 0);
+               return;
+       }
 
        ext_debug("ext4_end_io_dio(): io_end 0x%p "
                  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3077,25 +3017,13 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  size);
 
        iocb->private = NULL;
-
-       /* if not aio dio with unwritten extents, just free io and return */
-       if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
-               ext4_free_io_end(io_end);
-out:
-               inode_dio_done(inode);
-               if (is_async)
-                       aio_complete(iocb, ret, 0);
-               return;
-       }
-
        io_end->offset = offset;
        io_end->size = size;
        if (is_async) {
                io_end->iocb = iocb;
                io_end->result = ret;
        }
-
-       ext4_add_complete_io(io_end);
+       ext4_put_io_end_defer(io_end);
 }
 
 /*
@@ -3129,6 +3057,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        get_block_t *get_block_func = NULL;
        int dio_flags = 0;
        loff_t final_size = offset + count;
+       ext4_io_end_t *io_end = NULL;
 
        /* Use the old path for reads and writes beyond i_size. */
        if (rw != WRITE || final_size > inode->i_size)
@@ -3136,11 +3065,18 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
 
        BUG_ON(iocb->private == NULL);
 
+       /*
+        * Make all waiters for direct IO properly wait also for extent
+        * conversion. This also disallows race between truncate() and
+        * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
+        */
+       if (rw == WRITE)
+               atomic_inc(&inode->i_dio_count);
+
        /* If we do a overwrite dio, i_mutex locking can be released */
        overwrite = *((int *)iocb->private);
 
        if (overwrite) {
-               atomic_inc(&inode->i_dio_count);
                down_read(&EXT4_I(inode)->i_data_sem);
                mutex_unlock(&inode->i_mutex);
        }
@@ -3167,13 +3103,16 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        iocb->private = NULL;
        ext4_inode_aio_set(inode, NULL);
        if (!is_sync_kiocb(iocb)) {
-               ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
+               io_end = ext4_init_io_end(inode, GFP_NOFS);
                if (!io_end) {
                        ret = -ENOMEM;
                        goto retake_lock;
                }
                io_end->flag |= EXT4_IO_END_DIRECT;
-               iocb->private = io_end;
+               /*
+                * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
+                */
+               iocb->private = ext4_get_io_end(io_end);
                /*
                 * we save the io structure for current async direct
                 * IO, so that later ext4_map_blocks() could flag the
@@ -3197,33 +3136,42 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                                   NULL,
                                   dio_flags);
 
-       if (iocb->private)
-               ext4_inode_aio_set(inode, NULL);
        /*
-        * The io_end structure takes a reference to the inode, that
-        * structure needs to be destroyed and the reference to the
-        * inode need to be dropped, when IO is complete, even with 0
-        * byte write, or failed.
-        *
-        * In the successful AIO DIO case, the io_end structure will
-        * be destroyed and the reference to the inode will be dropped
-        * after the end_io call back function is called.
-        *
-        * In the case there is 0 byte write, or error case, since VFS
-        * direct IO won't invoke the end_io call back function, we
-        * need to free the end_io structure here.
+        * Put our reference to io_end. This can free the io_end structure e.g.
+        * in sync IO case or in case of error. It can even perform extent
+        * conversion if all bios we submitted finished before we got here.
+        * Note that in that case iocb->private can be already set to NULL
+        * here.
         */
-       if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
-               ext4_free_io_end(iocb->private);
-               iocb->private = NULL;
-       } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+       if (io_end) {
+               ext4_inode_aio_set(inode, NULL);
+               ext4_put_io_end(io_end);
+               /*
+                * When no IO was submitted ext4_end_io_dio() was not
+                * called so we have to put iocb's reference.
+                */
+               if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
+                       WARN_ON(iocb->private != io_end);
+                       WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
+                       WARN_ON(io_end->iocb);
+                       /*
+                        * Generic code already did inode_dio_done() so we
+                        * have to clear EXT4_IO_END_DIRECT to not do it for
+                        * the second time.
+                        */
+                       io_end->flag = 0;
+                       ext4_put_io_end(io_end);
+                       iocb->private = NULL;
+               }
+       }
+       if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
                                                EXT4_STATE_DIO_UNWRITTEN)) {
                int err;
                /*
                 * for non AIO case, since the IO is already
                 * completed, we could do the conversion right here
                 */
-               err = ext4_convert_unwritten_extents(inode,
+               err = ext4_convert_unwritten_extents(NULL, inode,
                                                     offset, ret);
                if (err < 0)
                        ret = err;
@@ -3231,9 +3179,10 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retake_lock:
+       if (rw == WRITE)
+               inode_dio_done(inode);
        /* take i_mutex locking again if we do a ovewrite dio */
        if (overwrite) {
-               inode_dio_done(inode);
                up_read(&EXT4_I(inode)->i_data_sem);
                mutex_lock(&inode->i_mutex);
        }
@@ -3292,6 +3241,7 @@ static const struct address_space_operations ext4_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
        .writepage              = ext4_writepage,
+       .writepages             = ext4_writepages,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_write_end,
        .bmap                   = ext4_bmap,
@@ -3307,6 +3257,7 @@ static const struct address_space_operations ext4_journalled_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
        .writepage              = ext4_writepage,
+       .writepages             = ext4_writepages,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_journalled_write_end,
        .set_page_dirty         = ext4_journalled_set_page_dirty,
@@ -3322,7 +3273,7 @@ static const struct address_space_operations ext4_da_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
        .writepage              = ext4_writepage,
-       .writepages             = ext4_da_writepages,
+       .writepages             = ext4_writepages,
        .write_begin            = ext4_da_write_begin,
        .write_end              = ext4_da_write_end,
        .bmap                   = ext4_bmap,
@@ -3355,89 +3306,56 @@ void ext4_set_aops(struct inode *inode)
                inode->i_mapping->a_ops = &ext4_aops;
 }
 
-
 /*
- * ext4_discard_partial_page_buffers()
- * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
- * This function finds and locks the page containing the offset
- * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
- * Calling functions that already have the page locked should call
- * ext4_discard_partial_page_buffers_no_lock directly.
+ * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
+ * up to the end of the block which corresponds to `from'.
+ * This required during truncate. We need to physically zero the tail end
+ * of that block so it doesn't yield old data if the file is later grown.
  */
-int ext4_discard_partial_page_buffers(handle_t *handle,
-               struct address_space *mapping, loff_t from,
-               loff_t length, int flags)
+int ext4_block_truncate_page(handle_t *handle,
+               struct address_space *mapping, loff_t from)
 {
+       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned length;
+       unsigned blocksize;
        struct inode *inode = mapping->host;
-       struct page *page;
-       int err = 0;
 
-       page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
-                                  mapping_gfp_mask(mapping) & ~__GFP_FS);
-       if (!page)
-               return -ENOMEM;
-
-       err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
-               from, length, flags);
+       blocksize = inode->i_sb->s_blocksize;
+       length = blocksize - (offset & (blocksize - 1));
 
-       unlock_page(page);
-       page_cache_release(page);
-       return err;
+       return ext4_block_zero_page_range(handle, mapping, from, length);
 }
 
 /*
- * ext4_discard_partial_page_buffers_no_lock()
- * Zeros a page range of length 'length' starting from offset 'from'.
- * Buffer heads that correspond to the block aligned regions of the
- * zeroed range will be unmapped.  Unblock aligned regions
- * will have the corresponding buffer head mapped if needed so that
- * that region of the page can be updated with the partial zero out.
- *
- * This function assumes that the page has already been  locked.  The
- * The range to be discarded must be contained with in the given page.
- * If the specified range exceeds the end of the page it will be shortened
- * to the end of the page that corresponds to 'from'.  This function is
- * appropriate for updating a page and it buffer heads to be unmapped and
- * zeroed for blocks that have been either released, or are going to be
- * released.
- *
- * handle: The journal handle
- * inode:  The files inode
- * page:   A locked page that contains the offset "from"
- * from:   The starting byte offset (from the beginning of the file)
- *         to begin discarding
- * len:    The length of bytes to discard
- * flags:  Optional flags that may be used:
- *
- *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
- *         Only zero the regions of the page whose buffer heads
- *         have already been unmapped.  This flag is appropriate
- *         for updating the contents of a page whose blocks may
- *         have already been released, and we only want to zero
- *         out the regions that correspond to those released blocks.
- *
- * Returns zero on success or negative on failure.
+ * ext4_block_zero_page_range() zeros out a mapping of length 'length'
+ * starting from file offset 'from'.  The range to be zero'd must
+ * be contained with in one block.  If the specified range exceeds
+ * the end of the block it will be shortened to end of the block
+ * that cooresponds to 'from'
  */
-static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
-               struct inode *inode, struct page *page, loff_t from,
-               loff_t length, int flags)
+int ext4_block_zero_page_range(handle_t *handle,
+               struct address_space *mapping, loff_t from, loff_t length)
 {
        ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned int offset = from & (PAGE_CACHE_SIZE-1);
-       unsigned int blocksize, max, pos;
+       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned blocksize, max, pos;
        ext4_lblk_t iblock;
+       struct inode *inode = mapping->host;
        struct buffer_head *bh;
+       struct page *page;
        int err = 0;
 
-       blocksize = inode->i_sb->s_blocksize;
-       max = PAGE_CACHE_SIZE - offset;
+       page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+                                  mapping_gfp_mask(mapping) & ~__GFP_FS);
+       if (!page)
+               return -ENOMEM;
 
-       if (index != page->index)
-               return -EINVAL;
+       blocksize = inode->i_sb->s_blocksize;
+       max = blocksize - (offset & (blocksize - 1));
 
        /*
         * correct length if it does not fall between
-        * 'from' and the end of the page
+        * 'from' and the end of the block
         */
        if (length > max || length < 0)
                length = max;
@@ -3455,106 +3373,91 @@ static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
                iblock++;
                pos += blocksize;
        }
-
-       pos = offset;
-       while (pos < offset + length) {
-               unsigned int end_of_block, range_to_discard;
-
-               err = 0;
-
-               /* The length of space left to zero and unmap */
-               range_to_discard = offset + length - pos;
-
-               /* The length of space until the end of the block */
-               end_of_block = blocksize - (pos & (blocksize-1));
-
-               /*
-                * Do not unmap or zero past end of block
-                * for this buffer head
-                */
-               if (range_to_discard > end_of_block)
-                       range_to_discard = end_of_block;
-
-
-               /*
-                * Skip this buffer head if we are only zeroing unampped
-                * regions of the page
-                */
-               if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
-                       buffer_mapped(bh))
-                               goto next;
-
-               /* If the range is block aligned, unmap */
-               if (range_to_discard == blocksize) {
-                       clear_buffer_dirty(bh);
-                       bh->b_bdev = NULL;
-                       clear_buffer_mapped(bh);
-                       clear_buffer_req(bh);
-                       clear_buffer_new(bh);
-                       clear_buffer_delay(bh);
-                       clear_buffer_unwritten(bh);
-                       clear_buffer_uptodate(bh);
-                       zero_user(page, pos, range_to_discard);
-                       BUFFER_TRACE(bh, "Buffer discarded");
-                       goto next;
-               }
-
-               /*
-                * If this block is not completely contained in the range
-                * to be discarded, then it is not going to be released. Because
-                * we need to keep this block, we need to make sure this part
-                * of the page is uptodate before we modify it by writeing
-                * partial zeros on it.
-                */
+       if (buffer_freed(bh)) {
+               BUFFER_TRACE(bh, "freed: skip");
+               goto unlock;
+       }
+       if (!buffer_mapped(bh)) {
+               BUFFER_TRACE(bh, "unmapped");
+               ext4_get_block(inode, iblock, bh, 0);
+               /* unmapped? It's a hole - nothing to do */
                if (!buffer_mapped(bh)) {
-                       /*
-                        * Buffer head must be mapped before we can read
-                        * from the block
-                        */
-                       BUFFER_TRACE(bh, "unmapped");
-                       ext4_get_block(inode, iblock, bh, 0);
-                       /* unmapped? It's a hole - nothing to do */
-                       if (!buffer_mapped(bh)) {
-                               BUFFER_TRACE(bh, "still unmapped");
-                               goto next;
-                       }
+                       BUFFER_TRACE(bh, "still unmapped");
+                       goto unlock;
                }
+       }
 
-               /* Ok, it's mapped. Make sure it's up-to-date */
-               if (PageUptodate(page))
-                       set_buffer_uptodate(bh);
+       /* Ok, it's mapped. Make sure it's up-to-date */
+       if (PageUptodate(page))
+               set_buffer_uptodate(bh);
 
-               if (!buffer_uptodate(bh)) {
-                       err = -EIO;
-                       ll_rw_block(READ, 1, &bh);
-                       wait_on_buffer(bh);
-                       /* Uhhuh. Read error. Complain and punt.*/
-                       if (!buffer_uptodate(bh))
-                               goto next;
-               }
+       if (!buffer_uptodate(bh)) {
+               err = -EIO;
+               ll_rw_block(READ, 1, &bh);
+               wait_on_buffer(bh);
+               /* Uhhuh. Read error. Complain and punt. */
+               if (!buffer_uptodate(bh))
+                       goto unlock;
+       }
+       if (ext4_should_journal_data(inode)) {
+               BUFFER_TRACE(bh, "get write access");
+               err = ext4_journal_get_write_access(handle, bh);
+               if (err)
+                       goto unlock;
+       }
+       zero_user(page, offset, length);
+       BUFFER_TRACE(bh, "zeroed end of block");
 
-               if (ext4_should_journal_data(inode)) {
-                       BUFFER_TRACE(bh, "get write access");
-                       err = ext4_journal_get_write_access(handle, bh);
-                       if (err)
-                               goto next;
-               }
+       if (ext4_should_journal_data(inode)) {
+               err = ext4_handle_dirty_metadata(handle, inode, bh);
+       } else {
+               err = 0;
+               mark_buffer_dirty(bh);
+               if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
+                       err = ext4_jbd2_file_inode(handle, inode);
+       }
 
-               zero_user(page, pos, range_to_discard);
+unlock:
+       unlock_page(page);
+       page_cache_release(page);
+       return err;
+}
 
-               err = 0;
-               if (ext4_should_journal_data(inode)) {
-                       err = ext4_handle_dirty_metadata(handle, inode, bh);
-               } else
-                       mark_buffer_dirty(bh);
+int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
+                            loff_t lstart, loff_t length)
+{
+       struct super_block *sb = inode->i_sb;
+       struct address_space *mapping = inode->i_mapping;
+       unsigned partial_start, partial_end;
+       ext4_fsblk_t start, end;
+       loff_t byte_end = (lstart + length - 1);
+       int err = 0;
 
-               BUFFER_TRACE(bh, "Partial buffer zeroed");
-next:
-               bh = bh->b_this_page;
-               iblock++;
-               pos += range_to_discard;
-       }
+       partial_start = lstart & (sb->s_blocksize - 1);
+       partial_end = byte_end & (sb->s_blocksize - 1);
 
+       start = lstart >> sb->s_blocksize_bits;
+       end = byte_end >> sb->s_blocksize_bits;
+
+       /* Handle partial zero within the single block */
+       if (start == end &&
+           (partial_start || (partial_end != sb->s_blocksize - 1))) {
+               err = ext4_block_zero_page_range(handle, mapping,
+                                                lstart, length);
+               return err;
+       }
+       /* Handle partial zero out on the start of the range */
+       if (partial_start) {
+               err = ext4_block_zero_page_range(handle, mapping,
+                                                lstart, sb->s_blocksize);
+               if (err)
+                       return err;
+       }
+       /* Handle partial zero out on the end of the range */
+       if (partial_end != sb->s_blocksize - 1)
+               err = ext4_block_zero_page_range(handle, mapping,
+                                                byte_end - partial_end,
+                                                partial_end + 1);
        return err;
 }
 
@@ -3580,14 +3483,12 @@ int ext4_can_truncate(struct inode *inode)
  * Returns: 0 on success or negative on failure
  */
 
-int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 {
-       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        ext4_lblk_t first_block, stop_block;
        struct address_space *mapping = inode->i_mapping;
-       loff_t first_page, last_page, page_len;
-       loff_t first_page_offset, last_page_offset;
+       loff_t first_block_offset, last_block_offset;
        handle_t *handle;
        unsigned int credits;
        int ret = 0;
@@ -3638,23 +3539,16 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
                   offset;
        }
 
-       first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       last_page = (offset + length) >> PAGE_CACHE_SHIFT;
+       first_block_offset = round_up(offset, sb->s_blocksize);
+       last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
 
-       first_page_offset = first_page << PAGE_CACHE_SHIFT;
-       last_page_offset = last_page << PAGE_CACHE_SHIFT;
-
-       /* Now release the pages */
-       if (last_page_offset > first_page_offset) {
-               truncate_pagecache_range(inode, first_page_offset,
-                                        last_page_offset - 1);
-       }
+       /* Now release the pages and zero block aligned part of pages*/
+       if (last_block_offset > first_block_offset)
+               truncate_pagecache_range(inode, first_block_offset,
+                                        last_block_offset);
 
        /* Wait all existing dio workers, newcomers will block on i_mutex */
        ext4_inode_block_unlocked_dio(inode);
-       ret = ext4_flush_unwritten_io(inode);
-       if (ret)
-               goto out_dio;
        inode_dio_wait(inode);
 
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -3668,66 +3562,10 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
                goto out_dio;
        }
 
-       /*
-        * Now we need to zero out the non-page-aligned data in the
-        * pages at the start and tail of the hole, and unmap the
-        * buffer heads for the block aligned regions of the page that
-        * were completely zeroed.
-        */
-       if (first_page > last_page) {
-               /*
-                * If the file space being truncated is contained
-                * within a page just zero out and unmap the middle of
-                * that page
-                */
-               ret = ext4_discard_partial_page_buffers(handle,
-                       mapping, offset, length, 0);
-
-               if (ret)
-                       goto out_stop;
-       } else {
-               /*
-                * zero out and unmap the partial page that contains
-                * the start of the hole
-                */
-               page_len = first_page_offset - offset;
-               if (page_len > 0) {
-                       ret = ext4_discard_partial_page_buffers(handle, mapping,
-                                               offset, page_len, 0);
-                       if (ret)
-                               goto out_stop;
-               }
-
-               /*
-                * zero out and unmap the partial page that contains
-                * the end of the hole
-                */
-               page_len = offset + length - last_page_offset;
-               if (page_len > 0) {
-                       ret = ext4_discard_partial_page_buffers(handle, mapping,
-                                       last_page_offset, page_len, 0);
-                       if (ret)
-                               goto out_stop;
-               }
-       }
-
-       /*
-        * If i_size is contained in the last page, we need to
-        * unmap and zero the partial page after i_size
-        */
-       if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
-          inode->i_size % PAGE_CACHE_SIZE != 0) {
-               page_len = PAGE_CACHE_SIZE -
-                       (inode->i_size & (PAGE_CACHE_SIZE - 1));
-
-               if (page_len > 0) {
-                       ret = ext4_discard_partial_page_buffers(handle,
-                                       mapping, inode->i_size, page_len, 0);
-
-                       if (ret)
-                               goto out_stop;
-               }
-       }
+       ret = ext4_zero_partial_blocks(handle, inode, offset,
+                                      length);
+       if (ret)
+               goto out_stop;
 
        first_block = (offset + sb->s_blocksize - 1) >>
                EXT4_BLOCK_SIZE_BITS(sb);
@@ -3803,7 +3641,6 @@ void ext4_truncate(struct inode *inode)
        unsigned int credits;
        handle_t *handle;
        struct address_space *mapping = inode->i_mapping;
-       loff_t page_len;
 
        /*
         * There is a possibility that we're either freeing the inode
@@ -3830,12 +3667,6 @@ void ext4_truncate(struct inode *inode)
                        return;
        }
 
-       /*
-        * finish any pending end_io work so we won't run the risk of
-        * converting any truncated blocks to initialized later
-        */
-       ext4_flush_unwritten_io(inode);
-
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                credits = ext4_writepage_trans_blocks(inode);
        else
@@ -3847,14 +3678,8 @@ void ext4_truncate(struct inode *inode)
                return;
        }
 
-       if (inode->i_size % PAGE_CACHE_SIZE != 0) {
-               page_len = PAGE_CACHE_SIZE -
-                       (inode->i_size & (PAGE_CACHE_SIZE - 1));
-
-               if (ext4_discard_partial_page_buffers(handle,
-                               mapping, inode->i_size, page_len, 0))
-                       goto out_stop;
-       }
+       if (inode->i_size & (inode->i_sb->s_blocksize - 1))
+               ext4_block_truncate_page(handle, mapping, inode->i_size);
 
        /*
         * We add the inode to the orphan list, so that if this
@@ -4623,7 +4448,8 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
                                      inode->i_size >> PAGE_CACHE_SHIFT);
                if (!page)
                        return;
-               ret = __ext4_journalled_invalidatepage(page, offset);
+               ret = __ext4_journalled_invalidatepage(page, offset,
+                                               PAGE_CACHE_SIZE - offset);
                unlock_page(page);
                page_cache_release(page);
                if (ret != -EBUSY)
@@ -4805,7 +4631,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
                 struct kstat *stat)
 {
        struct inode *inode;
-       unsigned long delalloc_blocks;
+       unsigned long long delalloc_blocks;
 
        inode = dentry->d_inode;
        generic_fillattr(inode, stat);
@@ -4823,15 +4649,16 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
        delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
                                EXT4_I(inode)->i_reserved_data_blocks);
 
-       stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
+       stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
        return 0;
 }
 
-static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
+                                  int pextents)
 {
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-               return ext4_ind_trans_blocks(inode, nrblocks, chunk);
-       return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
+               return ext4_ind_trans_blocks(inode, lblocks);
+       return ext4_ext_index_trans_blocks(inode, pextents);
 }
 
 /*
@@ -4845,7 +4672,8 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  *
  * Also account for superblock, inode, quota and xattr blocks
  */
-static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+                                 int pextents)
 {
        ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
        int gdpblocks;
@@ -4853,14 +4681,10 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
        int ret = 0;
 
        /*
-        * How many index blocks need to touch to modify nrblocks?
-        * The "Chunk" flag indicating whether the nrblocks is
-        * physically contiguous on disk
-        *
-        * For Direct IO and fallocate, they calls get_block to allocate
-        * one single extent at a time, so they could set the "Chunk" flag
+        * How many index blocks need to touch to map @lblocks logical blocks
+        * to @pextents physical extents?
         */
-       idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
+       idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
 
        ret = idxblocks;
 
@@ -4868,12 +4692,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
         * Now let's see how many group bitmaps and group descriptors need
         * to account
         */
-       groups = idxblocks;
-       if (chunk)
-               groups += 1;
-       else
-               groups += nrblocks;
-
+       groups = idxblocks + pextents;
        gdpblocks = groups;
        if (groups > ngroups)
                groups = ngroups;
@@ -4904,7 +4723,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
        int bpp = ext4_journal_blocks_per_page(inode);
        int ret;
 
-       ret = ext4_meta_trans_blocks(inode, bpp, 0);
+       ret = ext4_meta_trans_blocks(inode, bpp, bpp);
 
        /* Account for data blocks for journalled mode */
        if (ext4_should_journal_data(inode))
index def84082a9a9b73deadc0d369c63a7ef981d8ea9..a9ff5e5137ca85306f408eda2cab09669962f603 100644 (file)
@@ -2105,6 +2105,7 @@ repeat:
                group = ac->ac_g_ex.fe_group;
 
                for (i = 0; i < ngroups; group++, i++) {
+                       cond_resched();
                        /*
                         * Artificially restricted ngroups for non-extent
                         * files makes group > ngroups possible on first loop.
@@ -4405,17 +4406,20 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
 repeat:
                /* allocate space in core */
                *errp = ext4_mb_regular_allocator(ac);
-               if (*errp) {
-                       ext4_discard_allocated_blocks(ac);
-                       goto errout;
-               }
+               if (*errp)
+                       goto discard_and_exit;
 
                /* as we've just preallocated more space than
-                * user requested orinally, we store allocated
+                * user requested originally, we store allocated
                 * space in a special descriptor */
                if (ac->ac_status == AC_STATUS_FOUND &&
-                               ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
-                       ext4_mb_new_preallocation(ac);
+                   ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
+                       *errp = ext4_mb_new_preallocation(ac);
+               if (*errp) {
+               discard_and_exit:
+                       ext4_discard_allocated_blocks(ac);
+                       goto errout;
+               }
        }
        if (likely(ac->ac_status == AC_STATUS_FOUND)) {
                *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
@@ -4612,10 +4616,11 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
                BUG_ON(bh && (count > 1));
 
                for (i = 0; i < count; i++) {
+                       cond_resched();
                        if (!bh)
                                tbh = sb_find_get_block(inode->i_sb,
                                                        block + i);
-                       if (unlikely(!tbh))
+                       if (!tbh)
                                continue;
                        ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
                                    inode, tbh, block + i);
index 3dcbf364022fe286b1eb94afa16a50790e88bf29..e86dddbd8296c138347f9ba1c5f6e52d51a4c91c 100644 (file)
@@ -912,7 +912,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
        struct page *pagep[2] = {NULL, NULL};
        handle_t *handle;
        ext4_lblk_t orig_blk_offset;
-       long long offs = orig_page_offset << PAGE_CACHE_SHIFT;
        unsigned long blocksize = orig_inode->i_sb->s_blocksize;
        unsigned int w_flags = 0;
        unsigned int tmp_data_size, data_size, replaced_size;
@@ -940,8 +939,6 @@ again:
        orig_blk_offset = orig_page_offset * blocks_per_page +
                data_offset_in_page;
 
-       offs = (long long)orig_blk_offset << orig_inode->i_blkbits;
-
        /* Calculate data_size */
        if ((orig_blk_offset + block_len_in_page - 1) ==
            ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
index 6653fc35ecb7dda920377a7d45ab8efef2091b2d..ab2f6dc44b3abf88b62902433f48a1aa78ba8561 100644 (file)
@@ -918,11 +918,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
                                bh->b_data, bh->b_size,
                                (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
                                         + ((char *)de - bh->b_data))) {
-                       /* On error, skip the f_pos to the next block. */
-                       dir_file->f_pos = (dir_file->f_pos |
-                                       (dir->i_sb->s_blocksize - 1)) + 1;
-                       brelse(bh);
-                       return count;
+                       /* silently ignore the rest of the block */
+                       break;
                }
                ext4fs_dirhash(de->name, de->name_len, hinfo);
                if ((hinfo->hash < start_hash) ||
index 4acf1f78881b6c4c61aa10377a62ea4368106593..48786cdb5e6c8ccc9d94039d34432e8019c3d840 100644 (file)
@@ -46,46 +46,121 @@ void ext4_exit_pageio(void)
 }
 
 /*
- * This function is called by ext4_evict_inode() to make sure there is
- * no more pending I/O completion work left to do.
+ * Print an buffer I/O error compatible with the fs/buffer.c.  This
+ * provides compatibility with dmesg scrapers that look for a specific
+ * buffer I/O error message.  We really need a unified error reporting
+ * structure to userspace ala Digital Unix's uerf system, but it's
+ * probably not going to happen in my lifetime, due to LKML politics...
  */
-void ext4_ioend_shutdown(struct inode *inode)
+static void buffer_io_error(struct buffer_head *bh)
 {
-       wait_queue_head_t *wq = ext4_ioend_wq(inode);
+       char b[BDEVNAME_SIZE];
+       printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
+                       bdevname(bh->b_bdev, b),
+                       (unsigned long long)bh->b_blocknr);
+}
 
-       wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
-       /*
-        * We need to make sure the work structure is finished being
-        * used before we let the inode get destroyed.
-        */
-       if (work_pending(&EXT4_I(inode)->i_unwritten_work))
-               cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
+static void ext4_finish_bio(struct bio *bio)
+{
+       int i;
+       int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+
+       for (i = 0; i < bio->bi_vcnt; i++) {
+               struct bio_vec *bvec = &bio->bi_io_vec[i];
+               struct page *page = bvec->bv_page;
+               struct buffer_head *bh, *head;
+               unsigned bio_start = bvec->bv_offset;
+               unsigned bio_end = bio_start + bvec->bv_len;
+               unsigned under_io = 0;
+               unsigned long flags;
+
+               if (!page)
+                       continue;
+
+               if (error) {
+                       SetPageError(page);
+                       set_bit(AS_EIO, &page->mapping->flags);
+               }
+               bh = head = page_buffers(page);
+               /*
+                * We check all buffers in the page under BH_Uptodate_Lock
+                * to avoid races with other end io clearing async_write flags
+                */
+               local_irq_save(flags);
+               bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
+               do {
+                       if (bh_offset(bh) < bio_start ||
+                           bh_offset(bh) + bh->b_size > bio_end) {
+                               if (buffer_async_write(bh))
+                                       under_io++;
+                               continue;
+                       }
+                       clear_buffer_async_write(bh);
+                       if (error)
+                               buffer_io_error(bh);
+               } while ((bh = bh->b_this_page) != head);
+               bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+               local_irq_restore(flags);
+               if (!under_io)
+                       end_page_writeback(page);
+       }
 }
 
-void ext4_free_io_end(ext4_io_end_t *io)
+static void ext4_release_io_end(ext4_io_end_t *io_end)
 {
-       BUG_ON(!io);
-       BUG_ON(!list_empty(&io->list));
-       BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
+       struct bio *bio, *next_bio;
+
+       BUG_ON(!list_empty(&io_end->list));
+       BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
+       WARN_ON(io_end->handle);
 
-       if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
-               wake_up_all(ext4_ioend_wq(io->inode));
-       kmem_cache_free(io_end_cachep, io);
+       if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
+               wake_up_all(ext4_ioend_wq(io_end->inode));
+
+       for (bio = io_end->bio; bio; bio = next_bio) {
+               next_bio = bio->bi_private;
+               ext4_finish_bio(bio);
+               bio_put(bio);
+       }
+       if (io_end->flag & EXT4_IO_END_DIRECT)
+               inode_dio_done(io_end->inode);
+       if (io_end->iocb)
+               aio_complete(io_end->iocb, io_end->result, 0);
+       kmem_cache_free(io_end_cachep, io_end);
 }
 
-/* check a range of space and convert unwritten extents to written. */
+static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
+{
+       struct inode *inode = io_end->inode;
+
+       io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
+       /* Wake up anyone waiting on unwritten extent conversion */
+       if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
+               wake_up_all(ext4_ioend_wq(inode));
+}
+
+/*
+ * Check a range of space and convert unwritten extents to written. Note that
+ * we are protected from truncate touching same part of extent tree by the
+ * fact that truncate code waits for all DIO to finish (thus exclusion from
+ * direct IO is achieved) and also waits for PageWriteback bits. Thus we
+ * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
+ * completed (happens from ext4_free_ioend()).
+ */
 static int ext4_end_io(ext4_io_end_t *io)
 {
        struct inode *inode = io->inode;
        loff_t offset = io->offset;
        ssize_t size = io->size;
+       handle_t *handle = io->handle;
        int ret = 0;
 
        ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
                   "list->prev 0x%p\n",
                   io, inode->i_ino, io->list.next, io->list.prev);
 
-       ret = ext4_convert_unwritten_extents(inode, offset, size);
+       io->handle = NULL;      /* Following call will use up the handle */
+       ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
        if (ret < 0) {
                ext4_msg(inode->i_sb, KERN_EMERG,
                         "failed to convert unwritten extents to written "
@@ -93,30 +168,22 @@ static int ext4_end_io(ext4_io_end_t *io)
                         "(inode %lu, offset %llu, size %zd, error %d)",
                         inode->i_ino, offset, size, ret);
        }
-       /* Wake up anyone waiting on unwritten extent conversion */
-       if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
-               wake_up_all(ext4_ioend_wq(inode));
-       if (io->flag & EXT4_IO_END_DIRECT)
-               inode_dio_done(inode);
-       if (io->iocb)
-               aio_complete(io->iocb, io->result, 0);
+       ext4_clear_io_unwritten_flag(io);
+       ext4_release_io_end(io);
        return ret;
 }
 
-static void dump_completed_IO(struct inode *inode)
+static void dump_completed_IO(struct inode *inode, struct list_head *head)
 {
 #ifdef EXT4FS_DEBUG
        struct list_head *cur, *before, *after;
        ext4_io_end_t *io, *io0, *io1;
 
-       if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
-               ext4_debug("inode %lu completed_io list is empty\n",
-                          inode->i_ino);
+       if (list_empty(head))
                return;
-       }
 
-       ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
-       list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
+       ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
+       list_for_each_entry(io, head, list) {
                cur = &io->list;
                before = cur->prev;
                io0 = container_of(before, ext4_io_end_t, list);
@@ -130,23 +197,30 @@ static void dump_completed_IO(struct inode *inode)
 }
 
 /* Add the io_end to per-inode completed end_io list. */
-void ext4_add_complete_io(ext4_io_end_t *io_end)
+static void ext4_add_complete_io(ext4_io_end_t *io_end)
 {
        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
        struct workqueue_struct *wq;
        unsigned long flags;
 
        BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
-       wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
-
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       if (list_empty(&ei->i_completed_io_list))
-               queue_work(wq, &ei->i_unwritten_work);
-       list_add_tail(&io_end->list, &ei->i_completed_io_list);
+       if (io_end->handle) {
+               wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
+               if (list_empty(&ei->i_rsv_conversion_list))
+                       queue_work(wq, &ei->i_rsv_conversion_work);
+               list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
+       } else {
+               wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq;
+               if (list_empty(&ei->i_unrsv_conversion_list))
+                       queue_work(wq, &ei->i_unrsv_conversion_work);
+               list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list);
+       }
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 }
 
-static int ext4_do_flush_completed_IO(struct inode *inode)
+static int ext4_do_flush_completed_IO(struct inode *inode,
+                                     struct list_head *head)
 {
        ext4_io_end_t *io;
        struct list_head unwritten;
@@ -155,8 +229,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
        int err, ret = 0;
 
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       dump_completed_IO(inode);
-       list_replace_init(&ei->i_completed_io_list, &unwritten);
+       dump_completed_IO(inode, head);
+       list_replace_init(head, &unwritten);
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 
        while (!list_empty(&unwritten)) {
@@ -167,30 +241,25 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
                err = ext4_end_io(io);
                if (unlikely(!ret && err))
                        ret = err;
-               io->flag &= ~EXT4_IO_END_UNWRITTEN;
-               ext4_free_io_end(io);
        }
        return ret;
 }
 
 /*
- * work on completed aio dio IO, to convert unwritten extents to extents
+ * work on completed IO, to convert unwritten extents to extents
  */
-void ext4_end_io_work(struct work_struct *work)
+void ext4_end_io_rsv_work(struct work_struct *work)
 {
        struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
-                                                 i_unwritten_work);
-       ext4_do_flush_completed_IO(&ei->vfs_inode);
+                                                 i_rsv_conversion_work);
+       ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
 }
 
-int ext4_flush_unwritten_io(struct inode *inode)
+void ext4_end_io_unrsv_work(struct work_struct *work)
 {
-       int ret;
-       WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
-                    !(inode->i_state & I_FREEING));
-       ret = ext4_do_flush_completed_IO(inode);
-       ext4_unwritten_wait(inode);
-       return ret;
+       struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
+                                                 i_unrsv_conversion_work);
+       ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list);
 }
 
 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
@@ -200,83 +269,70 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
                atomic_inc(&EXT4_I(inode)->i_ioend_count);
                io->inode = inode;
                INIT_LIST_HEAD(&io->list);
+               atomic_set(&io->count, 1);
        }
        return io;
 }
 
-/*
- * Print an buffer I/O error compatible with the fs/buffer.c.  This
- * provides compatibility with dmesg scrapers that look for a specific
- * buffer I/O error message.  We really need a unified error reporting
- * structure to userspace ala Digital Unix's uerf system, but it's
- * probably not going to happen in my lifetime, due to LKML politics...
- */
-static void buffer_io_error(struct buffer_head *bh)
+void ext4_put_io_end_defer(ext4_io_end_t *io_end)
 {
-       char b[BDEVNAME_SIZE];
-       printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
-                       bdevname(bh->b_bdev, b),
-                       (unsigned long long)bh->b_blocknr);
+       if (atomic_dec_and_test(&io_end->count)) {
+               if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
+                       ext4_release_io_end(io_end);
+                       return;
+               }
+               ext4_add_complete_io(io_end);
+       }
+}
+
+int ext4_put_io_end(ext4_io_end_t *io_end)
+{
+       int err = 0;
+
+       if (atomic_dec_and_test(&io_end->count)) {
+               if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+                       err = ext4_convert_unwritten_extents(io_end->handle,
+                                               io_end->inode, io_end->offset,
+                                               io_end->size);
+                       io_end->handle = NULL;
+                       ext4_clear_io_unwritten_flag(io_end);
+               }
+               ext4_release_io_end(io_end);
+       }
+       return err;
+}
+
+ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
+{
+       atomic_inc(&io_end->count);
+       return io_end;
 }
 
 static void ext4_end_bio(struct bio *bio, int error)
 {
        ext4_io_end_t *io_end = bio->bi_private;
-       struct inode *inode;
-       int i;
-       int blocksize;
        sector_t bi_sector = bio->bi_sector;
 
        BUG_ON(!io_end);
-       inode = io_end->inode;
-       blocksize = 1 << inode->i_blkbits;
-       bio->bi_private = NULL;
        bio->bi_end_io = NULL;
        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
                error = 0;
-       for (i = 0; i < bio->bi_vcnt; i++) {
-               struct bio_vec *bvec = &bio->bi_io_vec[i];
-               struct page *page = bvec->bv_page;
-               struct buffer_head *bh, *head;
-               unsigned bio_start = bvec->bv_offset;
-               unsigned bio_end = bio_start + bvec->bv_len;
-               unsigned under_io = 0;
-               unsigned long flags;
 
-               if (!page)
-                       continue;
-
-               if (error) {
-                       SetPageError(page);
-                       set_bit(AS_EIO, &page->mapping->flags);
-               }
-               bh = head = page_buffers(page);
+       if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
                /*
-                * We check all buffers in the page under BH_Uptodate_Lock
-                * to avoid races with other end io clearing async_write flags
+                * Link bio into list hanging from io_end. We have to do it
+                * atomically as bio completions can be racing against each
+                * other.
                 */
-               local_irq_save(flags);
-               bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
-               do {
-                       if (bh_offset(bh) < bio_start ||
-                           bh_offset(bh) + blocksize > bio_end) {
-                               if (buffer_async_write(bh))
-                                       under_io++;
-                               continue;
-                       }
-                       clear_buffer_async_write(bh);
-                       if (error)
-                               buffer_io_error(bh);
-               } while ((bh = bh->b_this_page) != head);
-               bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
-               local_irq_restore(flags);
-               if (!under_io)
-                       end_page_writeback(page);
+               bio->bi_private = xchg(&io_end->bio, bio);
+       } else {
+               ext4_finish_bio(bio);
+               bio_put(bio);
        }
-       bio_put(bio);
 
        if (error) {
-               io_end->flag |= EXT4_IO_END_ERROR;
+               struct inode *inode = io_end->inode;
+
                ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
                             "(offset %llu size %ld starting block %llu)",
                             inode->i_ino,
@@ -285,13 +341,7 @@ static void ext4_end_bio(struct bio *bio, int error)
                             (unsigned long long)
                             bi_sector >> (inode->i_blkbits - 9));
        }
-
-       if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
-               ext4_free_io_end(io_end);
-               return;
-       }
-
-       ext4_add_complete_io(io_end);
+       ext4_put_io_end_defer(io_end);
 }
 
 void ext4_io_submit(struct ext4_io_submit *io)
@@ -305,43 +355,38 @@ void ext4_io_submit(struct ext4_io_submit *io)
                bio_put(io->io_bio);
        }
        io->io_bio = NULL;
-       io->io_op = 0;
+}
+
+void ext4_io_submit_init(struct ext4_io_submit *io,
+                        struct writeback_control *wbc)
+{
+       io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
+       io->io_bio = NULL;
        io->io_end = NULL;
 }
 
-static int io_submit_init(struct ext4_io_submit *io,
-                         struct inode *inode,
-                         struct writeback_control *wbc,
-                         struct buffer_head *bh)
+static int io_submit_init_bio(struct ext4_io_submit *io,
+                             struct buffer_head *bh)
 {
-       ext4_io_end_t *io_end;
-       struct page *page = bh->b_page;
        int nvecs = bio_get_nr_vecs(bh->b_bdev);
        struct bio *bio;
 
-       io_end = ext4_init_io_end(inode, GFP_NOFS);
-       if (!io_end)
-               return -ENOMEM;
        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
+       if (!bio)
+               return -ENOMEM;
        bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
-       bio->bi_private = io->io_end = io_end;
        bio->bi_end_io = ext4_end_bio;
-
-       io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
-
+       bio->bi_private = ext4_get_io_end(io->io_end);
        io->io_bio = bio;
-       io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
        io->io_next_block = bh->b_blocknr;
        return 0;
 }
 
 static int io_submit_add_bh(struct ext4_io_submit *io,
                            struct inode *inode,
-                           struct writeback_control *wbc,
                            struct buffer_head *bh)
 {
-       ext4_io_end_t *io_end;
        int ret;
 
        if (io->io_bio && bh->b_blocknr != io->io_next_block) {
@@ -349,18 +394,14 @@ submit_and_retry:
                ext4_io_submit(io);
        }
        if (io->io_bio == NULL) {
-               ret = io_submit_init(io, inode, wbc, bh);
+               ret = io_submit_init_bio(io, bh);
                if (ret)
                        return ret;
        }
-       io_end = io->io_end;
-       if (test_clear_buffer_uninit(bh))
-               ext4_set_io_unwritten_flag(inode, io_end);
-       io->io_end->size += bh->b_size;
-       io->io_next_block++;
        ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
        if (ret != bh->b_size)
                goto submit_and_retry;
+       io->io_next_block++;
        return 0;
 }
 
@@ -432,7 +473,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        do {
                if (!buffer_async_write(bh))
                        continue;
-               ret = io_submit_add_bh(io, inode, wbc, bh);
+               ret = io_submit_add_bh(io, inode, bh);
                if (ret) {
                        /*
                         * We only get here on ENOMEM.  Not much else
index b27c96d01965b97a1998b8542d31d9639a0b0161..c5adbb318a90c8c612c97dab1a47c0143b19ef77 100644 (file)
@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
        ext4_fsblk_t end = start + input->blocks_count;
        ext4_group_t group = input->group;
        ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
-       unsigned overhead = ext4_group_overhead_blocks(sb, group);
-       ext4_fsblk_t metaend = start + overhead;
+       unsigned overhead;
+       ext4_fsblk_t metaend;
        struct buffer_head *bh = NULL;
        ext4_grpblk_t free_blocks_count, offset;
        int err = -EINVAL;
 
+       if (group != sbi->s_groups_count) {
+               ext4_warning(sb, "Cannot add at group %u (only %u groups)",
+                            input->group, sbi->s_groups_count);
+               return -EINVAL;
+       }
+
+       overhead = ext4_group_overhead_blocks(sb, group);
+       metaend = start + overhead;
        input->free_blocks_count = free_blocks_count =
                input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
 
@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
                       free_blocks_count, input->reserved_blocks);
 
        ext4_get_group_no_and_offset(sb, start, NULL, &offset);
-       if (group != sbi->s_groups_count)
-               ext4_warning(sb, "Cannot add at group %u (only %u groups)",
-                            input->group, sbi->s_groups_count);
-       else if (offset != 0)
+       if (offset != 0)
                        ext4_warning(sb, "Last group not full");
        else if (input->reserved_blocks > input->blocks_count / 5)
                ext4_warning(sb, "Reserved blocks too high (%u)",
@@ -1551,11 +1556,10 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
        int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
                le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
        struct inode *inode = NULL;
-       int gdb_off, gdb_num;
+       int gdb_off;
        int err;
        __u16 bg_flags = 0;
 
-       gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
        gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
 
        if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -1656,12 +1660,10 @@ errout:
                err = err2;
 
        if (!err) {
-               ext4_fsblk_t first_block;
-               first_block = ext4_group_first_block_no(sb, 0);
                if (test_opt(sb, DEBUG))
                        printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
                               "blocks\n", ext4_blocks_count(es));
-               update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr - first_block,
+               update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
                               (char *)es, sizeof(struct ext4_super_block), 0);
        }
        return err;
index 94cc84db7c9aae349b44be229e993ec906a0a25a..85b3dd60169beba2b2535ae4342a436387273e32 100644 (file)
@@ -69,6 +69,7 @@ static void ext4_mark_recovery_complete(struct super_block *sb,
 static void ext4_clear_journal_err(struct super_block *sb,
                                   struct ext4_super_block *es);
 static int ext4_sync_fs(struct super_block *sb, int wait);
+static int ext4_sync_fs_nojournal(struct super_block *sb, int wait);
 static int ext4_remount(struct super_block *sb, int *flags, char *data);
 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
 static int ext4_unfreeze(struct super_block *sb);
@@ -398,6 +399,11 @@ static void ext4_handle_error(struct super_block *sb)
        }
        if (test_opt(sb, ERRORS_RO)) {
                ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
+               /*
+                * Make sure updated value of ->s_mount_flags will be visible
+                * before ->s_flags update
+                */
+               smp_wmb();
                sb->s_flags |= MS_RDONLY;
        }
        if (test_opt(sb, ERRORS_PANIC))
@@ -422,9 +428,9 @@ void __ext4_error(struct super_block *sb, const char *function,
        ext4_handle_error(sb);
 }
 
-void ext4_error_inode(struct inode *inode, const char *function,
-                     unsigned int line, ext4_fsblk_t block,
-                     const char *fmt, ...)
+void __ext4_error_inode(struct inode *inode, const char *function,
+                       unsigned int line, ext4_fsblk_t block,
+                       const char *fmt, ...)
 {
        va_list args;
        struct va_format vaf;
@@ -451,9 +457,9 @@ void ext4_error_inode(struct inode *inode, const char *function,
        ext4_handle_error(inode->i_sb);
 }
 
-void ext4_error_file(struct file *file, const char *function,
-                    unsigned int line, ext4_fsblk_t block,
-                    const char *fmt, ...)
+void __ext4_error_file(struct file *file, const char *function,
+                      unsigned int line, ext4_fsblk_t block,
+                      const char *fmt, ...)
 {
        va_list args;
        struct va_format vaf;
@@ -570,8 +576,13 @@ void __ext4_abort(struct super_block *sb, const char *function,
 
        if ((sb->s_flags & MS_RDONLY) == 0) {
                ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
-               sb->s_flags |= MS_RDONLY;
                EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+               /*
+                * Make sure updated value of ->s_mount_flags will be visible
+                * before ->s_flags update
+                */
+               smp_wmb();
+               sb->s_flags |= MS_RDONLY;
                if (EXT4_SB(sb)->s_journal)
                        jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
                save_error_info(sb, function, line);
@@ -580,7 +591,8 @@ void __ext4_abort(struct super_block *sb, const char *function,
                panic("EXT4-fs panic from previous error\n");
 }
 
-void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
+void __ext4_msg(struct super_block *sb,
+               const char *prefix, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
@@ -750,8 +762,10 @@ static void ext4_put_super(struct super_block *sb)
        ext4_unregister_li_request(sb);
        dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
 
-       flush_workqueue(sbi->dio_unwritten_wq);
-       destroy_workqueue(sbi->dio_unwritten_wq);
+       flush_workqueue(sbi->unrsv_conversion_wq);
+       flush_workqueue(sbi->rsv_conversion_wq);
+       destroy_workqueue(sbi->unrsv_conversion_wq);
+       destroy_workqueue(sbi->rsv_conversion_wq);
 
        if (sbi->s_journal) {
                err = jbd2_journal_destroy(sbi->s_journal);
@@ -760,7 +774,7 @@ static void ext4_put_super(struct super_block *sb)
                        ext4_abort(sb, "Couldn't clean up the journal");
        }
 
-       ext4_es_unregister_shrinker(sb);
+       ext4_es_unregister_shrinker(sbi);
        del_timer(&sbi->s_err_report);
        ext4_release_system_zone(sb);
        ext4_mb_release(sb);
@@ -849,6 +863,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        rwlock_init(&ei->i_es_lock);
        INIT_LIST_HEAD(&ei->i_es_lru);
        ei->i_es_lru_nr = 0;
+       ei->i_touch_when = 0;
        ei->i_reserved_data_blocks = 0;
        ei->i_reserved_meta_blocks = 0;
        ei->i_allocated_meta_blocks = 0;
@@ -859,13 +874,15 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->i_reserved_quota = 0;
 #endif
        ei->jinode = NULL;
-       INIT_LIST_HEAD(&ei->i_completed_io_list);
+       INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
+       INIT_LIST_HEAD(&ei->i_unrsv_conversion_list);
        spin_lock_init(&ei->i_completed_io_lock);
        ei->i_sync_tid = 0;
        ei->i_datasync_tid = 0;
        atomic_set(&ei->i_ioend_count, 0);
        atomic_set(&ei->i_unwritten, 0);
-       INIT_WORK(&ei->i_unwritten_work, ext4_end_io_work);
+       INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
+       INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work);
 
        return &ei->vfs_inode;
 }
@@ -1093,6 +1110,7 @@ static const struct super_operations ext4_nojournal_sops = {
        .dirty_inode    = ext4_dirty_inode,
        .drop_inode     = ext4_drop_inode,
        .evict_inode    = ext4_evict_inode,
+       .sync_fs        = ext4_sync_fs_nojournal,
        .put_super      = ext4_put_super,
        .statfs         = ext4_statfs,
        .remount_fs     = ext4_remount,
@@ -1908,7 +1926,6 @@ static int ext4_fill_flex_info(struct super_block *sb)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_group_desc *gdp = NULL;
        ext4_group_t flex_group;
-       unsigned int groups_per_flex = 0;
        int i, err;
 
        sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
@@ -1916,7 +1933,6 @@ static int ext4_fill_flex_info(struct super_block *sb)
                sbi->s_log_groups_per_flex = 0;
                return 1;
        }
-       groups_per_flex = 1U << sbi->s_log_groups_per_flex;
 
        err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
        if (err)
@@ -2164,19 +2180,22 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
                dquot_initialize(inode);
                if (inode->i_nlink) {
-                       ext4_msg(sb, KERN_DEBUG,
-                               "%s: truncating inode %lu to %lld bytes",
-                               __func__, inode->i_ino, inode->i_size);
+                       if (test_opt(sb, DEBUG))
+                               ext4_msg(sb, KERN_DEBUG,
+                                       "%s: truncating inode %lu to %lld bytes",
+                                       __func__, inode->i_ino, inode->i_size);
                        jbd_debug(2, "truncating inode %lu to %lld bytes\n",
                                  inode->i_ino, inode->i_size);
                        mutex_lock(&inode->i_mutex);
+                       truncate_inode_pages(inode->i_mapping, inode->i_size);
                        ext4_truncate(inode);
                        mutex_unlock(&inode->i_mutex);
                        nr_truncates++;
                } else {
-                       ext4_msg(sb, KERN_DEBUG,
-                               "%s: deleting unreferenced inode %lu",
-                               __func__, inode->i_ino);
+                       if (test_opt(sb, DEBUG))
+                               ext4_msg(sb, KERN_DEBUG,
+                                       "%s: deleting unreferenced inode %lu",
+                                       __func__, inode->i_ino);
                        jbd_debug(2, "deleting unreferenced inode %lu\n",
                                  inode->i_ino);
                        nr_orphans++;
@@ -2377,7 +2396,10 @@ struct ext4_attr {
        ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *);
        ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
                         const char *, size_t);
-       int offset;
+       union {
+               int offset;
+               int deprecated_val;
+       } u;
 };
 
 static int parse_strtoull(const char *buf,
@@ -2446,7 +2468,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
 static ssize_t sbi_ui_show(struct ext4_attr *a,
                           struct ext4_sb_info *sbi, char *buf)
 {
-       unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
+       unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
 }
@@ -2455,7 +2477,7 @@ static ssize_t sbi_ui_store(struct ext4_attr *a,
                            struct ext4_sb_info *sbi,
                            const char *buf, size_t count)
 {
-       unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
+       unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset);
        unsigned long t;
        int ret;
 
@@ -2504,12 +2526,20 @@ static ssize_t trigger_test_error(struct ext4_attr *a,
        return count;
 }
 
+static ssize_t sbi_deprecated_show(struct ext4_attr *a,
+                                  struct ext4_sb_info *sbi, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%d\n", a->u.deprecated_val);
+}
+
 #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
 static struct ext4_attr ext4_attr_##_name = {                  \
        .attr = {.name = __stringify(_name), .mode = _mode },   \
        .show   = _show,                                        \
        .store  = _store,                                       \
-       .offset = offsetof(struct ext4_sb_info, _elname),       \
+       .u = {                                                  \
+               .offset = offsetof(struct ext4_sb_info, _elname),\
+       },                                                      \
 }
 #define EXT4_ATTR(name, mode, show, store) \
 static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
@@ -2520,6 +2550,14 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
 #define EXT4_RW_ATTR_SBI_UI(name, elname)      \
        EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname)
 #define ATTR_LIST(name) &ext4_attr_##name.attr
+#define EXT4_DEPRECATED_ATTR(_name, _val)      \
+static struct ext4_attr ext4_attr_##_name = {                  \
+       .attr = {.name = __stringify(_name), .mode = 0444 },    \
+       .show   = sbi_deprecated_show,                          \
+       .u = {                                                  \
+               .deprecated_val = _val,                         \
+       },                                                      \
+}
 
 EXT4_RO_ATTR(delayed_allocation_blocks);
 EXT4_RO_ATTR(session_write_kbytes);
@@ -2534,7 +2572,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
-EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
 EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
 EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
 
@@ -3763,7 +3801,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_err_report.data = (unsigned long) sb;
 
        /* Register extent status tree shrinker */
-       ext4_es_register_shrinker(sb);
+       ext4_es_register_shrinker(sbi);
 
        err = percpu_counter_init(&sbi->s_freeclusters_counter,
                        ext4_count_free_clusters(sb));
@@ -3787,7 +3825,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        sbi->s_stripe = ext4_get_stripe_size(sbi);
-       sbi->s_max_writeback_mb_bump = 128;
        sbi->s_extent_max_zeroout_kb = 32;
 
        /*
@@ -3915,12 +3952,20 @@ no_journal:
         * The maximum number of concurrent works can be high and
         * concurrency isn't really necessary.  Limit it to 1.
         */
-       EXT4_SB(sb)->dio_unwritten_wq =
-               alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
-       if (!EXT4_SB(sb)->dio_unwritten_wq) {
-               printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
+       EXT4_SB(sb)->rsv_conversion_wq =
+               alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+       if (!EXT4_SB(sb)->rsv_conversion_wq) {
+               printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
                ret = -ENOMEM;
-               goto failed_mount_wq;
+               goto failed_mount4;
+       }
+
+       EXT4_SB(sb)->unrsv_conversion_wq =
+               alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+       if (!EXT4_SB(sb)->unrsv_conversion_wq) {
+               printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
+               ret = -ENOMEM;
+               goto failed_mount4;
        }
 
        /*
@@ -4074,14 +4119,17 @@ failed_mount4a:
        sb->s_root = NULL;
 failed_mount4:
        ext4_msg(sb, KERN_ERR, "mount failed");
-       destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
+       if (EXT4_SB(sb)->rsv_conversion_wq)
+               destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
+       if (EXT4_SB(sb)->unrsv_conversion_wq)
+               destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
 failed_mount_wq:
        if (sbi->s_journal) {
                jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
        }
 failed_mount3:
-       ext4_es_unregister_shrinker(sb);
+       ext4_es_unregister_shrinker(sbi);
        del_timer(&sbi->s_err_report);
        if (sbi->s_flex_groups)
                ext4_kvfree(sbi->s_flex_groups);
@@ -4517,19 +4565,52 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
 {
        int ret = 0;
        tid_t target;
+       bool needs_barrier = false;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
 
        trace_ext4_sync_fs(sb, wait);
-       flush_workqueue(sbi->dio_unwritten_wq);
+       flush_workqueue(sbi->rsv_conversion_wq);
+       flush_workqueue(sbi->unrsv_conversion_wq);
        /*
         * Writeback quota in non-journalled quota case - journalled quota has
         * no dirty dquots
         */
        dquot_writeback_dquots(sb, -1);
+       /*
+        * Data writeback is possible w/o journal transaction, so barrier must
+        * being sent at the end of the function. But we can skip it if
+        * transaction_commit will do it for us.
+        */
+       target = jbd2_get_latest_transaction(sbi->s_journal);
+       if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
+           !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
+               needs_barrier = true;
+
        if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
                if (wait)
-                       jbd2_log_wait_commit(sbi->s_journal, target);
+                       ret = jbd2_log_wait_commit(sbi->s_journal, target);
+       }
+       if (needs_barrier) {
+               int err;
+               err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+               if (!ret)
+                       ret = err;
        }
+
+       return ret;
+}
+
+static int ext4_sync_fs_nojournal(struct super_block *sb, int wait)
+{
+       int ret = 0;
+
+       trace_ext4_sync_fs(sb, wait);
+       flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
+       flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
+       dquot_writeback_dquots(sb, -1);
+       if (wait && test_opt(sb, BARRIER))
+               ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+
        return ret;
 }
 
index fd27e7e6326e61ddecee85c3435099aa12ba2dfd..e06e0995e00fdf619d1bbd7f48ebe0e9df25ebc0 100644 (file)
@@ -51,3 +51,15 @@ config F2FS_FS_POSIX_ACL
          Linux website <http://acl.bestbits.at/>.
 
          If you don't know what Access Control Lists are, say N
+
+config F2FS_FS_SECURITY
+       bool "F2FS Security Labels"
+       depends on F2FS_FS_XATTR
+       help
+         Security labels provide an access control facility to support Linux
+         Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
+         Linux. This option enables an extended attribute handler for file
+         security labels in the f2fs filesystem, so that it requires enabling
+         the extended attribute support in advance.
+
+         If you are not using a security module, say N.
index 44abc2f286e00ad4ecff03e0293f80ab8d3d19fd..b7826ec1b47062fdb30891192b42a8dbf5e20788 100644 (file)
@@ -250,7 +250,7 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
                }
        }
 
-       error = f2fs_setxattr(inode, name_index, "", value, size);
+       error = f2fs_setxattr(inode, name_index, "", value, size, NULL);
 
        kfree(value);
        if (!error)
index b1de01da1a409b42ae5bc6aae4989c201c920639..66a6b85a51d8ab1a1724f10858648876b09f8b4b 100644 (file)
@@ -357,8 +357,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
        unsigned long blk_size = sbi->blocksize;
        struct f2fs_checkpoint *cp_block;
        unsigned long long cur_version = 0, pre_version = 0;
-       unsigned int crc = 0;
        size_t crc_offset;
+       __u32 crc = 0;
 
        /* Read the 1st cp block in this CP pack */
        cp_page_1 = get_meta_page(sbi, cp_addr);
@@ -369,7 +369,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
        if (crc_offset >= blk_size)
                goto invalid_cp1;
 
-       crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
+       crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
        if (!f2fs_crc_valid(crc, cp_block, crc_offset))
                goto invalid_cp1;
 
@@ -384,7 +384,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
        if (crc_offset >= blk_size)
                goto invalid_cp2;
 
-       crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
+       crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
        if (!f2fs_crc_valid(crc, cp_block, crc_offset))
                goto invalid_cp2;
 
@@ -450,13 +450,30 @@ fail_no_cp:
        return -EINVAL;
 }
 
-void set_dirty_dir_page(struct inode *inode, struct page *page)
+static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct list_head *head = &sbi->dir_inode_list;
-       struct dir_inode_entry *new;
        struct list_head *this;
 
+       list_for_each(this, head) {
+               struct dir_inode_entry *entry;
+               entry = list_entry(this, struct dir_inode_entry, list);
+               if (entry->inode == inode)
+                       return -EEXIST;
+       }
+       list_add_tail(&new->list, head);
+#ifdef CONFIG_F2FS_STAT_FS
+       sbi->n_dirty_dirs++;
+#endif
+       return 0;
+}
+
+void set_dirty_dir_page(struct inode *inode, struct page *page)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct dir_inode_entry *new;
+
        if (!S_ISDIR(inode->i_mode))
                return;
 retry:
@@ -469,23 +486,31 @@ retry:
        INIT_LIST_HEAD(&new->list);
 
        spin_lock(&sbi->dir_inode_lock);
-       list_for_each(this, head) {
-               struct dir_inode_entry *entry;
-               entry = list_entry(this, struct dir_inode_entry, list);
-               if (entry->inode == inode) {
-                       kmem_cache_free(inode_entry_slab, new);
-                       goto out;
-               }
-       }
-       list_add_tail(&new->list, head);
-       sbi->n_dirty_dirs++;
+       if (__add_dirty_inode(inode, new))
+               kmem_cache_free(inode_entry_slab, new);
 
-       BUG_ON(!S_ISDIR(inode->i_mode));
-out:
        inc_page_count(sbi, F2FS_DIRTY_DENTS);
        inode_inc_dirty_dents(inode);
        SetPagePrivate(page);
+       spin_unlock(&sbi->dir_inode_lock);
+}
 
+void add_dirty_dir_inode(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct dir_inode_entry *new;
+retry:
+       new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+       if (!new) {
+               cond_resched();
+               goto retry;
+       }
+       new->inode = inode;
+       INIT_LIST_HEAD(&new->list);
+
+       spin_lock(&sbi->dir_inode_lock);
+       if (__add_dirty_inode(inode, new))
+               kmem_cache_free(inode_entry_slab, new);
        spin_unlock(&sbi->dir_inode_lock);
 }
 
@@ -499,8 +524,10 @@ void remove_dirty_dir_inode(struct inode *inode)
                return;
 
        spin_lock(&sbi->dir_inode_lock);
-       if (atomic_read(&F2FS_I(inode)->dirty_dents))
-               goto out;
+       if (atomic_read(&F2FS_I(inode)->dirty_dents)) {
+               spin_unlock(&sbi->dir_inode_lock);
+               return;
+       }
 
        list_for_each(this, head) {
                struct dir_inode_entry *entry;
@@ -508,12 +535,38 @@ void remove_dirty_dir_inode(struct inode *inode)
                if (entry->inode == inode) {
                        list_del(&entry->list);
                        kmem_cache_free(inode_entry_slab, entry);
+#ifdef CONFIG_F2FS_STAT_FS
                        sbi->n_dirty_dirs--;
+#endif
+                       break;
+               }
+       }
+       spin_unlock(&sbi->dir_inode_lock);
+
+       /* Only from the recovery routine */
+       if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
+               clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
+               iput(inode);
+       }
+}
+
+struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino)
+{
+       struct list_head *head = &sbi->dir_inode_list;
+       struct list_head *this;
+       struct inode *inode = NULL;
+
+       spin_lock(&sbi->dir_inode_lock);
+       list_for_each(this, head) {
+               struct dir_inode_entry *entry;
+               entry = list_entry(this, struct dir_inode_entry, list);
+               if (entry->inode->i_ino == ino) {
+                       inode = entry->inode;
                        break;
                }
        }
-out:
        spin_unlock(&sbi->dir_inode_lock);
+       return inode;
 }
 
 void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
@@ -595,7 +648,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        block_t start_blk;
        struct page *cp_page;
        unsigned int data_sum_blocks, orphan_blocks;
-       unsigned int crc32 = 0;
+       __u32 crc32 = 0;
        void *kaddr;
        int i;
 
@@ -664,8 +717,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
 
        crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
-       *(__le32 *)((unsigned char *)ckpt +
-                               le32_to_cpu(ckpt->checksum_offset))
+       *((__le32 *)((unsigned char *)ckpt +
+                               le32_to_cpu(ckpt->checksum_offset)))
                                = cpu_to_le32(crc32);
 
        start_blk = __start_cp_addr(sbi);
index 91ff93b0b0f403300f951f0d26fa2698ac535c9a..035f9a345cdf23446abbdaa26e8ac28340b715a0 100644 (file)
@@ -68,7 +68,9 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                                        struct buffer_head *bh_result)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
+#ifdef CONFIG_F2FS_STAT_FS
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+#endif
        pgoff_t start_fofs, end_fofs;
        block_t start_blkaddr;
 
@@ -78,7 +80,9 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                return 0;
        }
 
+#ifdef CONFIG_F2FS_STAT_FS
        sbi->total_hit_ext++;
+#endif
        start_fofs = fi->ext.fofs;
        end_fofs = fi->ext.fofs + fi->ext.len - 1;
        start_blkaddr = fi->ext.blk_addr;
@@ -96,7 +100,9 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                else
                        bh_result->b_size = UINT_MAX;
 
+#ifdef CONFIG_F2FS_STAT_FS
                sbi->read_hit_ext++;
+#endif
                read_unlock(&fi->ext.ext_lock);
                return 1;
        }
@@ -199,7 +205,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
        if (dn.data_blkaddr == NEW_ADDR)
                return ERR_PTR(-EINVAL);
 
-       page = grab_cache_page(mapping, index);
+       page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
        if (!page)
                return ERR_PTR(-ENOMEM);
 
@@ -233,18 +239,23 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
        struct page *page;
        int err;
 
+repeat:
+       page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
-       if (err)
+       if (err) {
+               f2fs_put_page(page, 1);
                return ERR_PTR(err);
+       }
        f2fs_put_dnode(&dn);
 
-       if (dn.data_blkaddr == NULL_ADDR)
+       if (dn.data_blkaddr == NULL_ADDR) {
+               f2fs_put_page(page, 1);
                return ERR_PTR(-ENOENT);
-repeat:
-       page = grab_cache_page(mapping, index);
-       if (!page)
-               return ERR_PTR(-ENOMEM);
+       }
 
        if (PageUptodate(page))
                return page;
@@ -274,9 +285,10 @@ repeat:
  *
  * Also, caller should grab and release a mutex by calling mutex_lock_op() and
  * mutex_unlock_op().
+ * Note that, npage is set only by make_empty_dir.
  */
-struct page *get_new_data_page(struct inode *inode, pgoff_t index,
-                                               bool new_i_size)
+struct page *get_new_data_page(struct inode *inode,
+               struct page *npage, pgoff_t index, bool new_i_size)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct address_space *mapping = inode->i_mapping;
@@ -284,18 +296,20 @@ struct page *get_new_data_page(struct inode *inode, pgoff_t index,
        struct dnode_of_data dn;
        int err;
 
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       set_new_dnode(&dn, inode, npage, npage, 0);
        err = get_dnode_of_data(&dn, index, ALLOC_NODE);
        if (err)
                return ERR_PTR(err);
 
        if (dn.data_blkaddr == NULL_ADDR) {
                if (reserve_new_block(&dn)) {
-                       f2fs_put_dnode(&dn);
+                       if (!npage)
+                               f2fs_put_dnode(&dn);
                        return ERR_PTR(-ENOSPC);
                }
        }
-       f2fs_put_dnode(&dn);
+       if (!npage)
+               f2fs_put_dnode(&dn);
 repeat:
        page = grab_cache_page(mapping, index);
        if (!page)
@@ -325,6 +339,8 @@ repeat:
        if (new_i_size &&
                i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
                i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
+               /* Only the directory inode sets new_i_size */
+               set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
                mark_inode_dirty_sync(inode);
        }
        return page;
@@ -481,8 +497,9 @@ int do_write_data_page(struct page *page)
         * If current allocation needs SSR,
         * it had better in-place writes for updated data.
         */
-       if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
-                               need_inplace_update(inode)) {
+       if (unlikely(old_blk_addr != NEW_ADDR &&
+                       !is_cold_data(page) &&
+                       need_inplace_update(inode))) {
                rewrite_data_page(F2FS_SB(inode->i_sb), page,
                                                old_blk_addr);
        } else {
@@ -684,6 +701,27 @@ err:
        return err;
 }
 
+static int f2fs_write_end(struct file *file,
+                       struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned copied,
+                       struct page *page, void *fsdata)
+{
+       struct inode *inode = page->mapping->host;
+
+       SetPageUptodate(page);
+       set_page_dirty(page);
+
+       if (pos + copied > i_size_read(inode)) {
+               i_size_write(inode, pos + copied);
+               mark_inode_dirty(inode);
+               update_inode_page(inode);
+       }
+
+       unlock_page(page);
+       page_cache_release(page);
+       return copied;
+}
+
 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
                const struct iovec *iov, loff_t offset, unsigned long nr_segs)
 {
@@ -698,7 +736,8 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
                                                  get_data_block_ro);
 }
 
-static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
+static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
+                                     unsigned int length)
 {
        struct inode *inode = page->mapping->host;
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -740,7 +779,7 @@ const struct address_space_operations f2fs_dblock_aops = {
        .writepage      = f2fs_write_data_page,
        .writepages     = f2fs_write_data_pages,
        .write_begin    = f2fs_write_begin,
-       .write_end      = nobh_write_end,
+       .write_end      = f2fs_write_end,
        .set_page_dirty = f2fs_set_data_page_dirty,
        .invalidatepage = f2fs_invalidate_data_page,
        .releasepage    = f2fs_release_data_page,
index 8d9943786c318effc179b281ea5d4d34947033ff..0d6c6aafb235b140b3ba44ab7245ba37212b2a27 100644 (file)
@@ -175,12 +175,12 @@ get_cache:
 
 static int stat_show(struct seq_file *s, void *v)
 {
-       struct f2fs_stat_info *si, *next;
+       struct f2fs_stat_info *si;
        int i = 0;
        int j;
 
        mutex_lock(&f2fs_stat_mutex);
-       list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
+       list_for_each_entry(si, &f2fs_stat_list, stat_list) {
                char devname[BDEVNAME_SIZE];
 
                update_general_status(si->sbi);
index 1ac6b93036b7a23980a66d4f6987a82c34ff32df..9d1cd423450d1ba12049a987a0e5478424a33850 100644 (file)
@@ -13,6 +13,7 @@
 #include "f2fs.h"
 #include "node.h"
 #include "acl.h"
+#include "xattr.h"
 
 static unsigned long dir_blocks(struct inode *inode)
 {
@@ -215,9 +216,9 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
 
 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
 {
-       struct page *page = NULL;
-       struct f2fs_dir_entry *de = NULL;
-       struct f2fs_dentry_block *dentry_blk = NULL;
+       struct page *page;
+       struct f2fs_dir_entry *de;
+       struct f2fs_dentry_block *dentry_blk;
 
        page = get_lock_data_page(dir, 0);
        if (IS_ERR(page))
@@ -264,15 +265,10 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
        f2fs_put_page(page, 1);
 }
 
-void init_dent_inode(const struct qstr *name, struct page *ipage)
+static void init_dent_inode(const struct qstr *name, struct page *ipage)
 {
        struct f2fs_node *rn;
 
-       if (IS_ERR(ipage))
-               return;
-
-       wait_on_page_writeback(ipage);
-
        /* copy name info. to this inode page */
        rn = (struct f2fs_node *)page_address(ipage);
        rn->i.i_namelen = cpu_to_le32(name->len);
@@ -280,14 +276,15 @@ void init_dent_inode(const struct qstr *name, struct page *ipage)
        set_page_dirty(ipage);
 }
 
-static int make_empty_dir(struct inode *inode, struct inode *parent)
+static int make_empty_dir(struct inode *inode,
+               struct inode *parent, struct page *page)
 {
        struct page *dentry_page;
        struct f2fs_dentry_block *dentry_blk;
        struct f2fs_dir_entry *de;
        void *kaddr;
 
-       dentry_page = get_new_data_page(inode, 0, true);
+       dentry_page = get_new_data_page(inode, page, 0, true);
        if (IS_ERR(dentry_page))
                return PTR_ERR(dentry_page);
 
@@ -317,63 +314,76 @@ static int make_empty_dir(struct inode *inode, struct inode *parent)
        return 0;
 }
 
-static int init_inode_metadata(struct inode *inode,
+static struct page *init_inode_metadata(struct inode *inode,
                struct inode *dir, const struct qstr *name)
 {
+       struct page *page;
+       int err;
+
        if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
-               int err;
-               err = new_inode_page(inode, name);
-               if (err)
-                       return err;
+               page = new_inode_page(inode, name);
+               if (IS_ERR(page))
+                       return page;
 
                if (S_ISDIR(inode->i_mode)) {
-                       err = make_empty_dir(inode, dir);
-                       if (err) {
-                               remove_inode_page(inode);
-                               return err;
-                       }
+                       err = make_empty_dir(inode, dir, page);
+                       if (err)
+                               goto error;
                }
 
                err = f2fs_init_acl(inode, dir);
-               if (err) {
-                       remove_inode_page(inode);
-                       return err;
-               }
+               if (err)
+                       goto error;
+
+               err = f2fs_init_security(inode, dir, name, page);
+               if (err)
+                       goto error;
+
+               wait_on_page_writeback(page);
        } else {
-               struct page *ipage;
-               ipage = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
-               if (IS_ERR(ipage))
-                       return PTR_ERR(ipage);
-               set_cold_node(inode, ipage);
-               init_dent_inode(name, ipage);
-               f2fs_put_page(ipage, 1);
+               page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
+               if (IS_ERR(page))
+                       return page;
+
+               wait_on_page_writeback(page);
+               set_cold_node(inode, page);
        }
+
+       init_dent_inode(name, page);
+
+       /*
+        * This file should be checkpointed during fsync.
+        * We lost i_pino from now on.
+        */
        if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
+               file_lost_pino(inode);
                inc_nlink(inode);
-               update_inode_page(inode);
        }
-       return 0;
+       return page;
+
+error:
+       f2fs_put_page(page, 1);
+       remove_inode_page(inode);
+       return ERR_PTR(err);
 }
 
 static void update_parent_metadata(struct inode *dir, struct inode *inode,
                                                unsigned int current_depth)
 {
-       bool need_dir_update = false;
-
        if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
                if (S_ISDIR(inode->i_mode)) {
                        inc_nlink(dir);
-                       need_dir_update = true;
+                       set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
                }
                clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
        }
        dir->i_mtime = dir->i_ctime = CURRENT_TIME;
        if (F2FS_I(dir)->i_current_depth != current_depth) {
                F2FS_I(dir)->i_current_depth = current_depth;
-               need_dir_update = true;
+               set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
        }
 
-       if (need_dir_update)
+       if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR))
                update_inode_page(dir);
        else
                mark_inode_dirty(dir);
@@ -423,6 +433,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
        struct page *dentry_page = NULL;
        struct f2fs_dentry_block *dentry_blk = NULL;
        int slots = GET_DENTRY_SLOTS(namelen);
+       struct page *page;
        int err = 0;
        int i;
 
@@ -448,7 +459,7 @@ start:
        bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));
 
        for (block = bidx; block <= (bidx + nblock - 1); block++) {
-               dentry_page = get_new_data_page(dir, block, true);
+               dentry_page = get_new_data_page(dir, NULL, block, true);
                if (IS_ERR(dentry_page))
                        return PTR_ERR(dentry_page);
 
@@ -465,12 +476,13 @@ start:
        ++level;
        goto start;
 add_dentry:
-       err = init_inode_metadata(inode, dir, name);
-       if (err)
-               goto fail;
-
        wait_on_page_writeback(dentry_page);
 
+       page = init_inode_metadata(inode, dir, name);
+       if (IS_ERR(page)) {
+               err = PTR_ERR(page);
+               goto fail;
+       }
        de = &dentry_blk->dentry[bit_pos];
        de->hash_code = dentry_hash;
        de->name_len = cpu_to_le16(namelen);
@@ -481,11 +493,14 @@ add_dentry:
                test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
        set_page_dirty(dentry_page);
 
-       update_parent_metadata(dir, inode, current_depth);
-
-       /* update parent inode number before releasing dentry page */
+       /* we don't need to mark_inode_dirty now */
        F2FS_I(inode)->i_pino = dir->i_ino;
+       update_inode(inode, page);
+       f2fs_put_page(page, 1);
+
+       update_parent_metadata(dir, inode, current_depth);
 fail:
+       clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
        kunmap(dentry_page);
        f2fs_put_page(dentry_page, 1);
        return err;
@@ -591,24 +606,19 @@ bool f2fs_empty_dir(struct inode *dir)
        return true;
 }
 
-static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
+static int f2fs_readdir(struct file *file, struct dir_context *ctx)
 {
-       unsigned long pos = file->f_pos;
        struct inode *inode = file_inode(file);
        unsigned long npages = dir_blocks(inode);
-       unsigned char *types = NULL;
        unsigned int bit_pos = 0, start_bit_pos = 0;
-       int over = 0;
        struct f2fs_dentry_block *dentry_blk = NULL;
        struct f2fs_dir_entry *de = NULL;
        struct page *dentry_page = NULL;
-       unsigned int n = 0;
+       unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
        unsigned char d_type = DT_UNKNOWN;
        int slots;
 
-       types = f2fs_filetype_table;
-       bit_pos = (pos % NR_DENTRY_IN_BLOCK);
-       n = (pos / NR_DENTRY_IN_BLOCK);
+       bit_pos = ((unsigned long)ctx->pos % NR_DENTRY_IN_BLOCK);
 
        for ( ; n < npages; n++) {
                dentry_page = get_lock_data_page(inode, n);
@@ -618,31 +628,28 @@ static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
                start_bit_pos = bit_pos;
                dentry_blk = kmap(dentry_page);
                while (bit_pos < NR_DENTRY_IN_BLOCK) {
-                       d_type = DT_UNKNOWN;
                        bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
                                                        NR_DENTRY_IN_BLOCK,
                                                        bit_pos);
                        if (bit_pos >= NR_DENTRY_IN_BLOCK)
                                break;
 
+                       ctx->pos += bit_pos - start_bit_pos;
                        de = &dentry_blk->dentry[bit_pos];
-                       if (types && de->file_type < F2FS_FT_MAX)
-                               d_type = types[de->file_type];
-
-                       over = filldir(dirent,
-                                       dentry_blk->filename[bit_pos],
-                                       le16_to_cpu(de->name_len),
-                                       (n * NR_DENTRY_IN_BLOCK) + bit_pos,
-                                       le32_to_cpu(de->ino), d_type);
-                       if (over) {
-                               file->f_pos += bit_pos - start_bit_pos;
+                       if (de->file_type < F2FS_FT_MAX)
+                               d_type = f2fs_filetype_table[de->file_type];
+                       else
+                               d_type = DT_UNKNOWN;
+                       if (!dir_emit(ctx,
+                                     dentry_blk->filename[bit_pos],
+                                     le16_to_cpu(de->name_len),
+                                     le32_to_cpu(de->ino), d_type))
                                goto success;
-                       }
                        slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
                        bit_pos += slots;
                }
                bit_pos = 0;
-               file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK;
+               ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK;
                kunmap(dentry_page);
                f2fs_put_page(dentry_page, 1);
                dentry_page = NULL;
@@ -659,7 +666,7 @@ success:
 const struct file_operations f2fs_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = f2fs_readdir,
+       .iterate        = f2fs_readdir,
        .fsync          = f2fs_sync_file,
        .unlocked_ioctl = f2fs_ioctl,
 };
index 20aab02f2a427181a4240ad99ad2b8200a235509..467d42d65c488a0ed3841b01e798841d0e48a395 100644 (file)
                typecheck(unsigned long long, b) &&                     \
                ((long long)((a) - (b)) > 0))
 
-typedef u64 block_t;
+typedef u32 block_t;   /*
+                        * should not change u32, since it is the on-disk block
+                        * address format, __le32.
+                        */
 typedef u32 nid_t;
 
 struct f2fs_mount_info {
        unsigned int    opt;
 };
 
-static inline __u32 f2fs_crc32(void *buff, size_t len)
+#define CRCPOLY_LE 0xedb88320
+
+static inline __u32 f2fs_crc32(void *buf, size_t len)
 {
-       return crc32_le(F2FS_SUPER_MAGIC, buff, len);
+       unsigned char *p = (unsigned char *)buf;
+       __u32 crc = F2FS_SUPER_MAGIC;
+       int i;
+
+       while (len--) {
+               crc ^= *p++;
+               for (i = 0; i < 8; i++)
+                       crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
+       }
+       return crc;
 }
 
-static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size)
+static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
 {
-       return f2fs_crc32(buff, buff_size) == blk_crc;
+       return f2fs_crc32(buf, buf_size) == blk_crc;
 }
 
 /*
@@ -148,7 +162,7 @@ struct extent_info {
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  */
 #define FADVISE_COLD_BIT       0x01
-#define FADVISE_CP_BIT         0x02
+#define FADVISE_LOST_PINO_BIT  0x02
 
 struct f2fs_inode_info {
        struct inode vfs_inode;         /* serve a vfs inode */
@@ -369,7 +383,6 @@ struct f2fs_sb_info {
        /* for directory inode management */
        struct list_head dir_inode_list;        /* dir inode list */
        spinlock_t dir_inode_lock;              /* for dir inode list lock */
-       unsigned int n_dirty_dirs;              /* # of dir inodes */
 
        /* basic file system units */
        unsigned int log_sectors_per_block;     /* log2 sectors per block */
@@ -406,12 +419,15 @@ struct f2fs_sb_info {
         * for stat information.
         * one is for the LFS mode, and the other is for the SSR mode.
         */
+#ifdef CONFIG_F2FS_STAT_FS
        struct f2fs_stat_info *stat_info;       /* FS status information */
        unsigned int segment_count[2];          /* # of allocated segments */
        unsigned int block_count[2];            /* # of allocated blocks */
-       unsigned int last_victim[2];            /* last victim segment # */
        int total_hit_ext, read_hit_ext;        /* extent cache hit ratio */
        int bg_gc;                              /* background gc calls */
+       unsigned int n_dirty_dirs;              /* # of dir inodes */
+#endif
+       unsigned int last_victim[2];            /* last victim segment # */
        spinlock_t stat_lock;                   /* lock for stat operations */
 };
 
@@ -495,9 +511,17 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 
 static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
 {
-       int i = 0;
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               mutex_lock(&sbi->fs_lock[i]);
+       int i;
+
+       for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
+               /*
+                * This is the only time we take multiple fs_lock[]
+                * instances; the order is immaterial since we
+                * always hold cp_mutex, which serializes multiple
+                * such operations.
+                */
+               mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
+       }
 }
 
 static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
@@ -843,9 +867,12 @@ static inline int f2fs_clear_bit(unsigned int nr, char *addr)
 /* used for f2fs_inode_info->flags */
 enum {
        FI_NEW_INODE,           /* indicate newly allocated inode */
+       FI_DIRTY_INODE,         /* indicate inode is dirty or not */
        FI_INC_LINK,            /* need to increment i_nlink */
        FI_ACL_MODE,            /* indicate acl mode */
        FI_NO_ALLOC,            /* should not allocate any blocks */
+       FI_UPDATE_DIR,          /* should update inode block for consistency */
+       FI_DELAY_IPUT,          /* used for the recovery */
 };
 
 static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
@@ -878,14 +905,21 @@ static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
        return 0;
 }
 
+static inline int f2fs_readonly(struct super_block *sb)
+{
+       return sb->s_flags & MS_RDONLY;
+}
+
 /*
  * file.c
  */
 int f2fs_sync_file(struct file *, loff_t, loff_t, int);
 void truncate_data_blocks(struct dnode_of_data *);
 void f2fs_truncate(struct inode *);
+int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int f2fs_setattr(struct dentry *, struct iattr *);
 int truncate_hole(struct inode *, pgoff_t, pgoff_t);
+int truncate_data_blocks_range(struct dnode_of_data *, int);
 long f2fs_ioctl(struct file *, unsigned int, unsigned long);
 long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
 
@@ -913,7 +947,6 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
 ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
                                struct page *, struct inode *);
-void init_dent_inode(const struct qstr *, struct page *);
 int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
 int f2fs_make_empty(struct inode *, struct inode *);
@@ -948,8 +981,8 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int remove_inode_page(struct inode *);
-int new_inode_page(struct inode *, const struct qstr *);
-struct page *new_node_page(struct dnode_of_data *, unsigned int);
+struct page *new_inode_page(struct inode *, const struct qstr *);
+struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
 void ra_node_page(struct f2fs_sb_info *, nid_t);
 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
 struct page *get_node_page_ra(struct page *, int);
@@ -974,7 +1007,6 @@ void destroy_node_manager_caches(void);
  */
 void f2fs_balance_fs(struct f2fs_sb_info *);
 void invalidate_blocks(struct f2fs_sb_info *, block_t);
-void locate_dirty_segment(struct f2fs_sb_info *, unsigned int);
 void clear_prefree_segments(struct f2fs_sb_info *);
 int npages_for_summary_flush(struct f2fs_sb_info *);
 void allocate_new_segments(struct f2fs_sb_info *);
@@ -1011,7 +1043,9 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
 int recover_orphan_inodes(struct f2fs_sb_info *);
 int get_valid_checkpoint(struct f2fs_sb_info *);
 void set_dirty_dir_page(struct inode *, struct page *);
+void add_dirty_dir_inode(struct inode *);
 void remove_dirty_dir_inode(struct inode *);
+struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t);
 void sync_dirty_dir_inodes(struct f2fs_sb_info *);
 void write_checkpoint(struct f2fs_sb_info *, bool);
 void init_orphan_info(struct f2fs_sb_info *);
@@ -1025,7 +1059,7 @@ int reserve_new_block(struct dnode_of_data *);
 void update_extent_cache(block_t, struct dnode_of_data *);
 struct page *find_data_page(struct inode *, pgoff_t, bool);
 struct page *get_lock_data_page(struct inode *, pgoff_t);
-struct page *get_new_data_page(struct inode *, pgoff_t, bool);
+struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
 int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
 int do_write_data_page(struct page *);
 
index 1cae864f8dfcd03c676a8e02b1b2261a94af4f21..d2d2b7dbdcc12b348ca89899f500748716c2104c 100644 (file)
@@ -63,9 +63,10 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
        f2fs_put_dnode(&dn);
        mutex_unlock_op(sbi, ilock);
 
+       file_update_time(vma->vm_file);
        lock_page(page);
        if (page->mapping != inode->i_mapping ||
-                       page_offset(page) >= i_size_read(inode) ||
+                       page_offset(page) > i_size_read(inode) ||
                        !PageUptodate(page)) {
                unlock_page(page);
                err = -EFAULT;
@@ -76,10 +77,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
         * check to see if the page is mapped already (no holes)
         */
        if (PageMappedToDisk(page))
-               goto out;
-
-       /* fill the page */
-       wait_on_page_writeback(page);
+               goto mapped;
 
        /* page is wholly or partially inside EOF */
        if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
@@ -90,7 +88,9 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
        set_page_dirty(page);
        SetPageUptodate(page);
 
-       file_update_time(vma->vm_file);
+mapped:
+       /* fill the page */
+       wait_on_page_writeback(page);
 out:
        sb_end_pagefault(inode->i_sb);
        return block_page_mkwrite_return(err);
@@ -102,6 +102,24 @@ static const struct vm_operations_struct f2fs_file_vm_ops = {
        .remap_pages    = generic_file_remap_pages,
 };
 
+static int get_parent_ino(struct inode *inode, nid_t *pino)
+{
+       struct dentry *dentry;
+
+       inode = igrab(inode);
+       dentry = d_find_any_alias(inode);
+       iput(inode);
+       if (!dentry)
+               return 0;
+
+       inode = igrab(dentry->d_parent->d_inode);
+       dput(dentry);
+
+       *pino = inode->i_ino;
+       iput(inode);
+       return 1;
+}
+
 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct inode *inode = file->f_mapping->host;
@@ -114,7 +132,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                .for_reclaim = 0,
        };
 
-       if (inode->i_sb->s_flags & MS_RDONLY)
+       if (f2fs_readonly(inode->i_sb))
                return 0;
 
        trace_f2fs_sync_file_enter(inode);
@@ -134,7 +152,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
                need_cp = true;
-       else if (is_cp_file(inode))
+       else if (file_wrong_pino(inode))
                need_cp = true;
        else if (!space_for_roll_forward(sbi))
                need_cp = true;
@@ -142,11 +160,23 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                need_cp = true;
 
        if (need_cp) {
+               nid_t pino;
+
                /* all the dirty node pages should be flushed for POR */
                ret = f2fs_sync_fs(inode->i_sb, 1);
+               if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
+                                       get_parent_ino(inode, &pino)) {
+                       F2FS_I(inode)->i_pino = pino;
+                       file_got_pino(inode);
+                       mark_inode_dirty_sync(inode);
+                       ret = f2fs_write_inode(inode, NULL);
+                       if (ret)
+                               goto out;
+               }
        } else {
                /* if there is no written node page, write its inode page */
                while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
+                       mark_inode_dirty_sync(inode);
                        ret = f2fs_write_inode(inode, NULL);
                        if (ret)
                                goto out;
@@ -168,7 +198,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
        return 0;
 }
 
-static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
        int nr_free = 0, ofs = dn->ofs_in_node;
        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
@@ -185,10 +215,10 @@ static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 
                update_extent_cache(NULL_ADDR, dn);
                invalidate_blocks(sbi, blkaddr);
-               dec_valid_block_count(sbi, dn->inode, 1);
                nr_free++;
        }
        if (nr_free) {
+               dec_valid_block_count(sbi, dn->inode, nr_free);
                set_page_dirty(dn->node_page);
                sync_inode_page(dn);
        }
@@ -291,7 +321,7 @@ void f2fs_truncate(struct inode *inode)
        }
 }
 
-static int f2fs_getattr(struct vfsmount *mnt,
+int f2fs_getattr(struct vfsmount *mnt,
                         struct dentry *dentry, struct kstat *stat)
 {
        struct inode *inode = dentry->d_inode;
@@ -387,7 +417,7 @@ static void fill_zero(struct inode *inode, pgoff_t index,
        f2fs_balance_fs(sbi);
 
        ilock = mutex_lock_op(sbi);
-       page = get_new_data_page(inode, index, false);
+       page = get_new_data_page(inode, NULL, index, false);
        mutex_unlock_op(sbi, ilock);
 
        if (!IS_ERR(page)) {
@@ -575,10 +605,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        int ret;
 
        switch (cmd) {
-       case FS_IOC_GETFLAGS:
+       case F2FS_IOC_GETFLAGS:
                flags = fi->i_flags & FS_FL_USER_VISIBLE;
                return put_user(flags, (int __user *) arg);
-       case FS_IOC_SETFLAGS:
+       case F2FS_IOC_SETFLAGS:
        {
                unsigned int oldflags;
 
index 14961593e93c84f47860e0a33df001f59a8d9508..35f9b1a196aa15bd4208a33dfca2b6d85bd63150 100644 (file)
@@ -76,7 +76,9 @@ static int gc_thread_func(void *data)
                else
                        wait_ms = increase_sleep_time(wait_ms);
 
+#ifdef CONFIG_F2FS_STAT_FS
                sbi->bg_gc++;
+#endif
 
                /* if return value is not zero, no victim was selected */
                if (f2fs_gc(sbi))
@@ -89,23 +91,28 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
 {
        struct f2fs_gc_kthread *gc_th;
        dev_t dev = sbi->sb->s_bdev->bd_dev;
+       int err = 0;
 
        if (!test_opt(sbi, BG_GC))
-               return 0;
+               goto out;
        gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
-       if (!gc_th)
-               return -ENOMEM;
+       if (!gc_th) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        sbi->gc_thread = gc_th;
        init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
        sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
                        "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
        if (IS_ERR(gc_th->f2fs_gc_task)) {
+               err = PTR_ERR(gc_th->f2fs_gc_task);
                kfree(gc_th);
                sbi->gc_thread = NULL;
-               return -ENOMEM;
        }
-       return 0;
+
+out:
+       return err;
 }
 
 void stop_gc_thread(struct f2fs_sb_info *sbi)
@@ -234,14 +241,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        struct victim_sel_policy p;
-       unsigned int secno;
+       unsigned int secno, max_cost;
        int nsearched = 0;
 
        p.alloc_mode = alloc_mode;
        select_policy(sbi, gc_type, type, &p);
 
        p.min_segno = NULL_SEGNO;
-       p.min_cost = get_max_cost(sbi, &p);
+       p.min_cost = max_cost = get_max_cost(sbi, &p);
 
        mutex_lock(&dirty_i->seglist_lock);
 
@@ -280,7 +287,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                        p.min_cost = cost;
                }
 
-               if (cost == get_max_cost(sbi, &p))
+               if (cost == max_cost)
                        continue;
 
                if (nsearched++ >= MAX_VICTIM_SEARCH) {
@@ -288,8 +295,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                        break;
                }
        }
-got_it:
        if (p.min_segno != NULL_SEGNO) {
+got_it:
                if (p.alloc_mode == LFS) {
                        secno = GET_SECNO(sbi, p.min_segno);
                        if (gc_type == FG_GC)
@@ -314,28 +321,21 @@ static const struct victim_selection default_v_ops = {
 
 static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
 {
-       struct list_head *this;
        struct inode_entry *ie;
 
-       list_for_each(this, ilist) {
-               ie = list_entry(this, struct inode_entry, list);
+       list_for_each_entry(ie, ilist, list)
                if (ie->inode->i_ino == ino)
                        return ie->inode;
-       }
        return NULL;
 }
 
 static void add_gc_inode(struct inode *inode, struct list_head *ilist)
 {
-       struct list_head *this;
-       struct inode_entry *new_ie, *ie;
+       struct inode_entry *new_ie;
 
-       list_for_each(this, ilist) {
-               ie = list_entry(this, struct inode_entry, list);
-               if (ie->inode == inode) {
-                       iput(inode);
-                       return;
-               }
+       if (inode == find_gc_inode(inode->i_ino, ilist)) {
+               iput(inode);
+               return;
        }
 repeat:
        new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
index 91ac7f9d88eeaf84a86f38bb07866095910d8620..2b2d45d19e3ea1e1101ff893a2ce9e0f19e607ea 100644 (file)
@@ -109,12 +109,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
        ret = do_read_inode(inode);
        if (ret)
                goto bad_inode;
-
-       if (!sbi->por_doing && inode->i_nlink == 0) {
-               ret = -ENOENT;
-               goto bad_inode;
-       }
-
 make_now:
        if (ino == F2FS_NODE_INO(sbi)) {
                inode->i_mapping->a_ops = &f2fs_node_aops;
@@ -130,8 +124,7 @@ make_now:
                inode->i_op = &f2fs_dir_inode_operations;
                inode->i_fop = &f2fs_dir_operations;
                inode->i_mapping->a_ops = &f2fs_dblock_aops;
-               mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE |
-                               __GFP_ZERO);
+               mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
        } else if (S_ISLNK(inode->i_mode)) {
                inode->i_op = &f2fs_symlink_inode_operations;
                inode->i_mapping->a_ops = &f2fs_dblock_aops;
@@ -199,6 +192,7 @@ void update_inode(struct inode *inode, struct page *node_page)
 
        set_cold_node(inode, node_page);
        set_page_dirty(node_page);
+       clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
 }
 
 int update_inode_page(struct inode *inode)
@@ -224,6 +218,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
                        inode->i_ino == F2FS_META_INO(sbi))
                return 0;
 
+       if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
+               return 0;
+
        if (wbc)
                f2fs_balance_fs(sbi);
 
index 47abc9722b17abfae9656b3d5d52360fc8e72ad8..64c07169df050b4b358186540031365bd67f4b56 100644 (file)
@@ -112,7 +112,7 @@ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode,
        int count = le32_to_cpu(sbi->raw_super->extension_count);
        for (i = 0; i < count; i++) {
                if (is_multimedia_file(name, extlist[i])) {
-                       set_cold_file(inode);
+                       file_set_cold(inode);
                        break;
                }
        }
@@ -149,8 +149,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 
        alloc_nid_done(sbi, ino);
 
-       if (!sbi->por_doing)
-               d_instantiate(dentry, inode);
+       d_instantiate(dentry, inode);
        unlock_new_inode(inode);
        return 0;
 out:
@@ -173,7 +172,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
        f2fs_balance_fs(sbi);
 
        inode->i_ctime = CURRENT_TIME;
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        set_inode_flag(F2FS_I(inode), FI_INC_LINK);
        ilock = mutex_lock_op(sbi);
@@ -182,17 +181,10 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
        if (err)
                goto out;
 
-       /*
-        * This file should be checkpointed during fsync.
-        * We lost i_pino from now on.
-        */
-       set_cp_file(inode);
-
        d_instantiate(dentry, inode);
        return 0;
 out:
        clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       make_bad_inode(inode);
        iput(inode);
        return err;
 }
@@ -498,6 +490,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
        .rmdir          = f2fs_rmdir,
        .mknod          = f2fs_mknod,
        .rename         = f2fs_rename,
+       .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
        .get_acl        = f2fs_get_acl,
 #ifdef CONFIG_F2FS_FS_XATTR
@@ -512,6 +505,7 @@ const struct inode_operations f2fs_symlink_inode_operations = {
        .readlink       = generic_readlink,
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
+       .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
 #ifdef CONFIG_F2FS_FS_XATTR
        .setxattr       = generic_setxattr,
@@ -522,6 +516,7 @@ const struct inode_operations f2fs_symlink_inode_operations = {
 };
 
 const struct inode_operations f2fs_special_inode_operations = {
+       .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
        .get_acl        = f2fs_get_acl,
 #ifdef CONFIG_F2FS_FS_XATTR
index 3df43b4efd89e96e971263e61369dee5d3e39b87..b418aee09573f6d4aff7c4f821ee4faf86d98b0b 100644 (file)
@@ -408,10 +408,13 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
        level = get_node_path(index, offset, noffset);
 
        nids[0] = dn->inode->i_ino;
-       npage[0] = get_node_page(sbi, nids[0]);
-       if (IS_ERR(npage[0]))
-               return PTR_ERR(npage[0]);
+       npage[0] = dn->inode_page;
 
+       if (!npage[0]) {
+               npage[0] = get_node_page(sbi, nids[0]);
+               if (IS_ERR(npage[0]))
+                       return PTR_ERR(npage[0]);
+       }
        parent = npage[0];
        if (level != 0)
                nids[1] = get_nid(parent, offset[0], true);
@@ -430,7 +433,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
                        }
 
                        dn->nid = nids[i];
-                       npage[i] = new_node_page(dn, noffset[i]);
+                       npage[i] = new_node_page(dn, noffset[i], NULL);
                        if (IS_ERR(npage[i])) {
                                alloc_nid_failed(sbi, nids[i]);
                                err = PTR_ERR(npage[i]);
@@ -803,22 +806,19 @@ int remove_inode_page(struct inode *inode)
        return 0;
 }
 
-int new_inode_page(struct inode *inode, const struct qstr *name)
+struct page *new_inode_page(struct inode *inode, const struct qstr *name)
 {
-       struct page *page;
        struct dnode_of_data dn;
 
        /* allocate inode page for new inode */
        set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-       page = new_node_page(&dn, 0);
-       init_dent_inode(name, page);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
-       f2fs_put_page(page, 1);
-       return 0;
+
+       /* caller should f2fs_put_page(page, 1); */
+       return new_node_page(&dn, 0, NULL);
 }
 
-struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+struct page *new_node_page(struct dnode_of_data *dn,
+                               unsigned int ofs, struct page *ipage)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
        struct address_space *mapping = sbi->node_inode->i_mapping;
@@ -851,7 +851,10 @@ struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
        set_cold_node(dn->inode, page);
 
        dn->node_page = page;
-       sync_inode_page(dn);
+       if (ipage)
+               update_inode(dn->inode, ipage);
+       else
+               sync_inode_page(dn);
        set_page_dirty(page);
        if (ofs == 0)
                inc_valid_inode_count(sbi);
@@ -1205,7 +1208,8 @@ static int f2fs_set_node_page_dirty(struct page *page)
        return 0;
 }
 
-static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
+static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
+                                     unsigned int length)
 {
        struct inode *inode = page->mapping->host;
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -1492,9 +1496,10 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
        new_ni = old_ni;
        new_ni.ino = ino;
 
+       if (!inc_valid_node_count(sbi, NULL, 1))
+               WARN_ON(1);
        set_node_addr(sbi, &new_ni, NEW_ADDR);
        inc_valid_inode_count(sbi);
-
        f2fs_put_page(ipage, 1);
        return 0;
 }
index 0a2d72f0024ddf88c4df50c46d0eda59a00bbb62..c65fb4f4230f699ba7647c820b328942b43b8490 100644 (file)
@@ -275,25 +275,27 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
  *  - Mark cold node blocks in their node footer
  *  - Mark cold data pages in page cache
  */
-static inline int is_cold_file(struct inode *inode)
+static inline int is_file(struct inode *inode, int type)
 {
-       return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT;
+       return F2FS_I(inode)->i_advise & type;
 }
 
-static inline void set_cold_file(struct inode *inode)
+static inline void set_file(struct inode *inode, int type)
 {
-       F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT;
+       F2FS_I(inode)->i_advise |= type;
 }
 
-static inline int is_cp_file(struct inode *inode)
+static inline void clear_file(struct inode *inode, int type)
 {
-       return F2FS_I(inode)->i_advise & FADVISE_CP_BIT;
+       F2FS_I(inode)->i_advise &= ~type;
 }
 
-static inline void set_cp_file(struct inode *inode)
-{
-       F2FS_I(inode)->i_advise |= FADVISE_CP_BIT;
-}
+#define file_is_cold(inode)    is_file(inode, FADVISE_COLD_BIT)
+#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
+#define file_set_cold(inode)   set_file(inode, FADVISE_COLD_BIT)
+#define file_lost_pino(inode)  set_file(inode, FADVISE_LOST_PINO_BIT)
+#define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
+#define file_got_pino(inode)   clear_file(inode, FADVISE_LOST_PINO_BIT)
 
 static inline int is_cold_data(struct page *page)
 {
@@ -310,29 +312,16 @@ static inline void clear_cold_data(struct page *page)
        ClearPageChecked(page);
 }
 
-static inline int is_cold_node(struct page *page)
+static inline int is_node(struct page *page, int type)
 {
        void *kaddr = page_address(page);
        struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       return flag & (0x1 << COLD_BIT_SHIFT);
+       return le32_to_cpu(rn->footer.flag) & (1 << type);
 }
 
-static inline unsigned char is_fsync_dnode(struct page *page)
-{
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       return flag & (0x1 << FSYNC_BIT_SHIFT);
-}
-
-static inline unsigned char is_dent_dnode(struct page *page)
-{
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       return flag & (0x1 << DENT_BIT_SHIFT);
-}
+#define is_cold_node(page)     is_node(page, COLD_BIT_SHIFT)
+#define is_fsync_dnode(page)   is_node(page, FSYNC_BIT_SHIFT)
+#define is_dent_dnode(page)    is_node(page, DENT_BIT_SHIFT)
 
 static inline void set_cold_node(struct inode *inode, struct page *page)
 {
@@ -346,26 +335,15 @@ static inline void set_cold_node(struct inode *inode, struct page *page)
        rn->footer.flag = cpu_to_le32(flag);
 }
 
-static inline void set_fsync_mark(struct page *page, int mark)
+static inline void set_mark(struct page *page, int mark, int type)
 {
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       if (mark)
-               flag |= (0x1 << FSYNC_BIT_SHIFT);
-       else
-               flag &= ~(0x1 << FSYNC_BIT_SHIFT);
-       rn->footer.flag = cpu_to_le32(flag);
-}
-
-static inline void set_dentry_mark(struct page *page, int mark)
-{
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
        unsigned int flag = le32_to_cpu(rn->footer.flag);
        if (mark)
-               flag |= (0x1 << DENT_BIT_SHIFT);
+               flag |= (0x1 << type);
        else
-               flag &= ~(0x1 << DENT_BIT_SHIFT);
+               flag &= ~(0x1 << type);
        rn->footer.flag = cpu_to_le32(flag);
 }
+#define set_dentry_mark(page, mark)    set_mark(page, mark, DENT_BIT_SHIFT)
+#define set_fsync_mark(page, mark)     set_mark(page, mark, FSYNC_BIT_SHIFT)
index 60c8a5097058f02e02c28979dad759a7d734438a..d56d951c22537a14a9f103c679e93e64321d863f 100644 (file)
@@ -40,36 +40,54 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
 
 static int recover_dentry(struct page *ipage, struct inode *inode)
 {
-       struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
+       void *kaddr = page_address(ipage);
+       struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
        struct f2fs_inode *raw_inode = &(raw_node->i);
-       struct qstr name;
+       nid_t pino = le32_to_cpu(raw_inode->i_pino);
        struct f2fs_dir_entry *de;
+       struct qstr name;
        struct page *page;
-       struct inode *dir;
+       struct inode *dir, *einode;
        int err = 0;
 
-       if (!is_dent_dnode(ipage))
-               goto out;
-
-       dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
-       if (IS_ERR(dir)) {
-               err = PTR_ERR(dir);
-               goto out;
+       dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
+       if (!dir) {
+               dir = f2fs_iget(inode->i_sb, pino);
+               if (IS_ERR(dir)) {
+                       err = PTR_ERR(dir);
+                       goto out;
+               }
+               set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
+               add_dirty_dir_inode(dir);
        }
 
        name.len = le32_to_cpu(raw_inode->i_namelen);
        name.name = raw_inode->i_name;
-
+retry:
        de = f2fs_find_entry(dir, &name, &page);
-       if (de) {
+       if (de && inode->i_ino == le32_to_cpu(de->ino)) {
                kunmap(page);
                f2fs_put_page(page, 0);
-       } else {
-               err = __f2fs_add_link(dir, &name, inode);
+               goto out;
+       }
+       if (de) {
+               einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
+               if (IS_ERR(einode)) {
+                       WARN_ON(1);
+                       if (PTR_ERR(einode) == -ENOENT)
+                               err = -EEXIST;
+                       goto out;
+               }
+               f2fs_delete_entry(de, page, einode);
+               iput(einode);
+               goto retry;
        }
-       iput(dir);
+       err = __f2fs_add_link(dir, &name, inode);
 out:
-       kunmap(ipage);
+       f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
+                       "ino = %x, name = %s, dir = %lx, err = %d",
+                       ino_of_node(ipage), raw_inode->i_name,
+                       IS_ERR(dir) ? 0 : dir->i_ino, err);
        return err;
 }
 
@@ -79,6 +97,9 @@ static int recover_inode(struct inode *inode, struct page *node_page)
        struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
        struct f2fs_inode *raw_inode = &(raw_node->i);
 
+       if (!IS_INODE(node_page))
+               return 0;
+
        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
        i_size_write(inode, le64_to_cpu(raw_inode->i_size));
        inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
@@ -88,7 +109,12 @@ static int recover_inode(struct inode *inode, struct page *node_page)
        inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 
-       return recover_dentry(node_page, inode);
+       if (is_dent_dnode(node_page))
+               return recover_dentry(node_page, inode);
+
+       f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
+                       ino_of_node(node_page), raw_inode->i_name);
+       return 0;
 }
 
 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
@@ -119,14 +145,13 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
                lock_page(page);
 
                if (cp_ver != cpver_of_node(page))
-                       goto unlock_out;
+                       break;
 
                if (!is_fsync_dnode(page))
                        goto next;
 
                entry = get_fsync_inode(head, ino_of_node(page));
                if (entry) {
-                       entry->blkaddr = blkaddr;
                        if (IS_INODE(page) && is_dent_dnode(page))
                                set_inode_flag(F2FS_I(entry->inode),
                                                        FI_INC_LINK);
@@ -134,48 +159,40 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
                        if (IS_INODE(page) && is_dent_dnode(page)) {
                                err = recover_inode_page(sbi, page);
                                if (err)
-                                       goto unlock_out;
+                                       break;
                        }
 
                        /* add this fsync inode to the list */
                        entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
                        if (!entry) {
                                err = -ENOMEM;
-                               goto unlock_out;
+                               break;
                        }
 
                        entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
                        if (IS_ERR(entry->inode)) {
                                err = PTR_ERR(entry->inode);
                                kmem_cache_free(fsync_entry_slab, entry);
-                               goto unlock_out;
+                               break;
                        }
-
                        list_add_tail(&entry->list, head);
-                       entry->blkaddr = blkaddr;
-               }
-               if (IS_INODE(page)) {
-                       err = recover_inode(entry->inode, page);
-                       if (err == -ENOENT) {
-                               goto next;
-                       } else if (err) {
-                               err = -EINVAL;
-                               goto unlock_out;
-                       }
                }
+               entry->blkaddr = blkaddr;
+
+               err = recover_inode(entry->inode, page);
+               if (err && err != -ENOENT)
+                       break;
 next:
                /* check next segment */
                blkaddr = next_blkaddr_of_node(page);
        }
-unlock_out:
        unlock_page(page);
 out:
        __free_pages(page, 0);
        return err;
 }
 
-static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
-                                       struct list_head *head)
+static void destroy_fsync_dnodes(struct list_head *head)
 {
        struct fsync_inode_entry *entry, *tmp;
 
@@ -186,15 +203,15 @@ static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
        }
 }
 
-static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
-                                               block_t blkaddr)
+static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+                       block_t blkaddr, struct dnode_of_data *dn)
 {
        struct seg_entry *sentry;
        unsigned int segno = GET_SEGNO(sbi, blkaddr);
        unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
                                        (sbi->blocks_per_seg - 1);
        struct f2fs_summary sum;
-       nid_t ino;
+       nid_t ino, nid;
        void *kaddr;
        struct inode *inode;
        struct page *node_page;
@@ -203,7 +220,7 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
 
        sentry = get_seg_entry(sbi, segno);
        if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
-               return;
+               return 0;
 
        /* Get the previous summary */
        for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
@@ -222,20 +239,39 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
                f2fs_put_page(sum_page, 1);
        }
 
+       /* Use the locked dnode page and inode */
+       nid = le32_to_cpu(sum.nid);
+       if (dn->inode->i_ino == nid) {
+               struct dnode_of_data tdn = *dn;
+               tdn.nid = nid;
+               tdn.node_page = dn->inode_page;
+               tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+               truncate_data_blocks_range(&tdn, 1);
+               return 0;
+       } else if (dn->nid == nid) {
+               struct dnode_of_data tdn = *dn;
+               tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+               truncate_data_blocks_range(&tdn, 1);
+               return 0;
+       }
+
        /* Get the node page */
-       node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
+       node_page = get_node_page(sbi, nid);
+       if (IS_ERR(node_page))
+               return PTR_ERR(node_page);
        bidx = start_bidx_of_node(ofs_of_node(node_page)) +
-                               le16_to_cpu(sum.ofs_in_node);
+                                       le16_to_cpu(sum.ofs_in_node);
        ino = ino_of_node(node_page);
        f2fs_put_page(node_page, 1);
 
        /* Deallocate previous index in the node page */
        inode = f2fs_iget(sbi->sb, ino);
        if (IS_ERR(inode))
-               return;
+               return PTR_ERR(inode);
 
        truncate_hole(inode, bidx, bidx + 1);
        iput(inode);
+       return 0;
 }
 
 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
@@ -245,7 +281,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        struct dnode_of_data dn;
        struct f2fs_summary sum;
        struct node_info ni;
-       int err = 0;
+       int err = 0, recovered = 0;
        int ilock;
 
        start = start_bidx_of_node(ofs_of_node(page));
@@ -283,13 +319,16 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
                        }
 
                        /* Check the previous node page having this index */
-                       check_index_in_prev_nodes(sbi, dest);
+                       err = check_index_in_prev_nodes(sbi, dest, &dn);
+                       if (err)
+                               goto err;
 
                        set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
 
                        /* write dummy data page */
                        recover_data_page(sbi, NULL, &sum, src, dest);
                        update_extent_cache(dest, &dn);
+                       recovered++;
                }
                dn.ofs_in_node++;
        }
@@ -305,9 +344,14 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        set_page_dirty(dn.node_page);
 
        recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
+err:
        f2fs_put_dnode(&dn);
        mutex_unlock_op(sbi, ilock);
-       return 0;
+
+       f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
+                       "recovered_data = %d blocks, err = %d",
+                       inode->i_ino, recovered, err);
+       return err;
 }
 
 static int recover_data(struct f2fs_sb_info *sbi,
@@ -340,7 +384,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
                lock_page(page);
 
                if (cp_ver != cpver_of_node(page))
-                       goto unlock_out;
+                       break;
 
                entry = get_fsync_inode(head, ino_of_node(page));
                if (!entry)
@@ -348,7 +392,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
 
                err = do_recover_data(sbi, entry->inode, page, blkaddr);
                if (err)
-                       goto out;
+                       break;
 
                if (entry->blkaddr == blkaddr) {
                        iput(entry->inode);
@@ -359,7 +403,6 @@ next:
                /* check next segment */
                blkaddr = next_blkaddr_of_node(page);
        }
-unlock_out:
        unlock_page(page);
 out:
        __free_pages(page, 0);
@@ -382,6 +425,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
        INIT_LIST_HEAD(&inode_list);
 
        /* step #1: find fsynced inode numbers */
+       sbi->por_doing = 1;
        err = find_fsync_dnodes(sbi, &inode_list);
        if (err)
                goto out;
@@ -390,13 +434,13 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
                goto out;
 
        /* step #2: recover data */
-       sbi->por_doing = 1;
        err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
-       sbi->por_doing = 0;
        BUG_ON(!list_empty(&inode_list));
 out:
-       destroy_fsync_dnodes(sbi, &inode_list);
+       destroy_fsync_dnodes(&inode_list);
        kmem_cache_destroy(fsync_entry_slab);
-       write_checkpoint(sbi, false);
+       sbi->por_doing = 0;
+       if (!err)
+               write_checkpoint(sbi, false);
        return err;
 }
index d8e84e49a5c301cb139acfa6d0e06cbd1544b82a..a86d125a9885e274b11a421d4b117480a2744994 100644 (file)
@@ -94,7 +94,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  * Adding dirty entry into seglist is not critical operation.
  * If a given segment is one of current working segments, it won't be added.
  */
-void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
+static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        unsigned short valid_blocks;
@@ -126,17 +126,16 @@ void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned int segno, offset = 0;
+       unsigned int segno = -1;
        unsigned int total_segs = TOTAL_SEGS(sbi);
 
        mutex_lock(&dirty_i->seglist_lock);
        while (1) {
                segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
-                               offset);
+                               segno + 1);
                if (segno >= total_segs)
                        break;
                __set_test_and_free(sbi, segno);
-               offset = segno + 1;
        }
        mutex_unlock(&dirty_i->seglist_lock);
 }
@@ -144,17 +143,16 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
 void clear_prefree_segments(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned int segno, offset = 0;
+       unsigned int segno = -1;
        unsigned int total_segs = TOTAL_SEGS(sbi);
 
        mutex_lock(&dirty_i->seglist_lock);
        while (1) {
                segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
-                               offset);
+                               segno + 1);
                if (segno >= total_segs)
                        break;
 
-               offset = segno + 1;
                if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
                        dirty_i->nr_dirty[PRE]--;
 
@@ -257,11 +255,11 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
  * This function should be resided under the curseg_mutex lock
  */
 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
-               struct f2fs_summary *sum, unsigned short offset)
+                                       struct f2fs_summary *sum)
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
        void *addr = curseg->sum_blk;
-       addr += offset * sizeof(struct f2fs_summary);
+       addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
        memcpy(addr, sum, sizeof(struct f2fs_summary));
        return;
 }
@@ -311,64 +309,14 @@ static void write_sum_page(struct f2fs_sb_info *sbi,
        f2fs_put_page(page, 1);
 }
 
-static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
-{
-       struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
-       unsigned int segno;
-       unsigned int ofs = 0;
-
-       /*
-        * If there is not enough reserved sections,
-        * we should not reuse prefree segments.
-        */
-       if (has_not_enough_free_secs(sbi, 0))
-               return NULL_SEGNO;
-
-       /*
-        * NODE page should not reuse prefree segment,
-        * since those information is used for SPOR.
-        */
-       if (IS_NODESEG(type))
-               return NULL_SEGNO;
-next:
-       segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
-       ofs += sbi->segs_per_sec;
-
-       if (segno < TOTAL_SEGS(sbi)) {
-               int i;
-
-               /* skip intermediate segments in a section */
-               if (segno % sbi->segs_per_sec)
-                       goto next;
-
-               /* skip if the section is currently used */
-               if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
-                       goto next;
-
-               /* skip if whole section is not prefree */
-               for (i = 1; i < sbi->segs_per_sec; i++)
-                       if (!test_bit(segno + i, prefree_segmap))
-                               goto next;
-
-               /* skip if whole section was not free at the last checkpoint */
-               for (i = 0; i < sbi->segs_per_sec; i++)
-                       if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
-                               goto next;
-
-               return segno;
-       }
-       return NULL_SEGNO;
-}
-
 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
-       unsigned int segno = curseg->segno;
+       unsigned int segno = curseg->segno + 1;
        struct free_segmap_info *free_i = FREE_I(sbi);
 
-       if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec)
-               return !test_bit(segno + 1, free_i->free_segmap);
+       if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
+               return !test_bit(segno, free_i->free_segmap);
        return 0;
 }
 
@@ -495,7 +443,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
        int dir = ALLOC_LEFT;
 
        write_sum_page(sbi, curseg->sum_blk,
-                               GET_SUM_BLOCK(sbi, curseg->segno));
+                               GET_SUM_BLOCK(sbi, segno));
        if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
                dir = ALLOC_RIGHT;
 
@@ -599,11 +547,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
                goto out;
        }
 
-       curseg->next_segno = check_prefree_segments(sbi, type);
-
-       if (curseg->next_segno != NULL_SEGNO)
-               change_curseg(sbi, type, false);
-       else if (type == CURSEG_WARM_NODE)
+       if (type == CURSEG_WARM_NODE)
                new_curseg(sbi, type, false);
        else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
                new_curseg(sbi, type, false);
@@ -612,7 +556,10 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
        else
                new_curseg(sbi, type, false);
 out:
+#ifdef CONFIG_F2FS_STAT_FS
        sbi->segment_count[curseg->alloc_type]++;
+#endif
+       return;
 }
 
 void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -795,7 +742,7 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type)
 
                if (S_ISDIR(inode->i_mode))
                        return CURSEG_HOT_DATA;
-               else if (is_cold_data(page) || is_cold_file(inode))
+               else if (is_cold_data(page) || file_is_cold(inode))
                        return CURSEG_COLD_DATA;
                else
                        return CURSEG_WARM_DATA;
@@ -844,11 +791,13 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
         * because, this function updates a summary entry in the
         * current summary block.
         */
-       __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+       __add_sum_entry(sbi, type, sum);
 
        mutex_lock(&sit_i->sentry_lock);
        __refresh_next_blkoff(sbi, curseg);
+#ifdef CONFIG_F2FS_STAT_FS
        sbi->block_count[curseg->alloc_type]++;
+#endif
 
        /*
         * SIT information should be updated before segment allocation,
@@ -943,7 +892,7 @@ void recover_data_page(struct f2fs_sb_info *sbi,
 
        curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
                                        (sbi->blocks_per_seg - 1);
-       __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+       __add_sum_entry(sbi, type, sum);
 
        refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
 
@@ -980,7 +929,7 @@ void rewrite_node_page(struct f2fs_sb_info *sbi,
        }
        curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
                                        (sbi->blocks_per_seg - 1);
-       __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+       __add_sum_entry(sbi, type, sum);
 
        /* change the current log to the next block addr in advance */
        if (next_segno != segno) {
@@ -1579,13 +1528,13 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        struct free_segmap_info *free_i = FREE_I(sbi);
-       unsigned int segno = 0, offset = 0;
+       unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
        unsigned short valid_blocks;
 
-       while (segno < TOTAL_SEGS(sbi)) {
+       while (1) {
                /* find dirty segment based on free segmap */
-               segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
-               if (segno >= TOTAL_SEGS(sbi))
+               segno = find_next_inuse(free_i, total_segs, offset);
+               if (segno >= total_segs)
                        break;
                offset = segno + 1;
                valid_blocks = get_valid_blocks(sbi, segno, 0);
index 8555f7df82c796720c2c85ce0bd0af6248efa1c2..75c7dc363e9267378d5cdf39f811c16a7adc8692 100644 (file)
@@ -34,7 +34,7 @@
 static struct kmem_cache *f2fs_inode_cachep;
 
 enum {
-       Opt_gc_background_off,
+       Opt_gc_background,
        Opt_disable_roll_forward,
        Opt_discard,
        Opt_noheap,
@@ -46,7 +46,7 @@ enum {
 };
 
 static match_table_t f2fs_tokens = {
-       {Opt_gc_background_off, "background_gc_off"},
+       {Opt_gc_background, "background_gc=%s"},
        {Opt_disable_roll_forward, "disable_roll_forward"},
        {Opt_discard, "discard"},
        {Opt_noheap, "no_heap"},
@@ -76,6 +76,91 @@ static void init_once(void *foo)
        inode_init_once(&fi->vfs_inode);
 }
 
+static int parse_options(struct super_block *sb, char *options)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       substring_t args[MAX_OPT_ARGS];
+       char *p, *name;
+       int arg = 0;
+
+       if (!options)
+               return 0;
+
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token;
+               if (!*p)
+                       continue;
+               /*
+                * Initialize args struct so we know whether arg was
+                * found; some options take optional arguments.
+                */
+               args[0].to = args[0].from = NULL;
+               token = match_token(p, f2fs_tokens, args);
+
+               switch (token) {
+               case Opt_gc_background:
+                       name = match_strdup(&args[0]);
+
+                       if (!name)
+                               return -ENOMEM;
+                       if (!strncmp(name, "on", 2))
+                               set_opt(sbi, BG_GC);
+                       else if (!strncmp(name, "off", 3))
+                               clear_opt(sbi, BG_GC);
+                       else {
+                               kfree(name);
+                               return -EINVAL;
+                       }
+                       kfree(name);
+                       break;
+               case Opt_disable_roll_forward:
+                       set_opt(sbi, DISABLE_ROLL_FORWARD);
+                       break;
+               case Opt_discard:
+                       set_opt(sbi, DISCARD);
+                       break;
+               case Opt_noheap:
+                       set_opt(sbi, NOHEAP);
+                       break;
+#ifdef CONFIG_F2FS_FS_XATTR
+               case Opt_nouser_xattr:
+                       clear_opt(sbi, XATTR_USER);
+                       break;
+#else
+               case Opt_nouser_xattr:
+                       f2fs_msg(sb, KERN_INFO,
+                               "nouser_xattr options not supported");
+                       break;
+#endif
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+               case Opt_noacl:
+                       clear_opt(sbi, POSIX_ACL);
+                       break;
+#else
+               case Opt_noacl:
+                       f2fs_msg(sb, KERN_INFO, "noacl options not supported");
+                       break;
+#endif
+               case Opt_active_logs:
+                       if (args->from && match_int(args, &arg))
+                               return -EINVAL;
+                       if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
+                               return -EINVAL;
+                       sbi->active_logs = arg;
+                       break;
+               case Opt_disable_ext_identify:
+                       set_opt(sbi, DISABLE_EXT_IDENTIFY);
+                       break;
+               default:
+                       f2fs_msg(sb, KERN_ERR,
+                               "Unrecognized mount option \"%s\" or missing value",
+                               p);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
 static struct inode *f2fs_alloc_inode(struct super_block *sb)
 {
        struct f2fs_inode_info *fi;
@@ -112,6 +197,17 @@ static int f2fs_drop_inode(struct inode *inode)
        return generic_drop_inode(inode);
 }
 
+/*
+ * f2fs_dirty_inode() is called from __mark_inode_dirty()
+ *
+ * We should call set_dirty_inode to write the dirty inode through write_inode.
+ */
+static void f2fs_dirty_inode(struct inode *inode, int flags)
+{
+       set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+       return;
+}
+
 static void f2fs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
@@ -170,7 +266,7 @@ static int f2fs_freeze(struct super_block *sb)
 {
        int err;
 
-       if (sb->s_flags & MS_RDONLY)
+       if (f2fs_readonly(sb))
                return 0;
 
        err = f2fs_sync_fs(sb, 1);
@@ -214,10 +310,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
 
-       if (test_opt(sbi, BG_GC))
-               seq_puts(seq, ",background_gc_on");
+       if (!(root->d_sb->s_flags & MS_RDONLY) && test_opt(sbi, BG_GC))
+               seq_printf(seq, ",background_gc=%s", "on");
        else
-               seq_puts(seq, ",background_gc_off");
+               seq_printf(seq, ",background_gc=%s", "off");
        if (test_opt(sbi, DISABLE_ROLL_FORWARD))
                seq_puts(seq, ",disable_roll_forward");
        if (test_opt(sbi, DISCARD))
@@ -244,11 +340,64 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
        return 0;
 }
 
+static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct f2fs_mount_info org_mount_opt;
+       int err, active_logs;
+
+       /*
+        * Save the old mount options in case we
+        * need to restore them.
+        */
+       org_mount_opt = sbi->mount_opt;
+       active_logs = sbi->active_logs;
+
+       /* parse mount options */
+       err = parse_options(sb, data);
+       if (err)
+               goto restore_opts;
+
+       /*
+        * Previous and new state of filesystem is RO,
+        * so no point in checking GC conditions.
+        */
+       if ((sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
+               goto skip;
+
+       /*
+        * We stop the GC thread if FS is mounted as RO
+        * or if background_gc = off is passed in mount
+        * option. Also sync the filesystem.
+        */
+       if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
+               if (sbi->gc_thread) {
+                       stop_gc_thread(sbi);
+                       f2fs_sync_fs(sb, 1);
+               }
+       } else if (test_opt(sbi, BG_GC) && !sbi->gc_thread) {
+               err = start_gc_thread(sbi);
+               if (err)
+                       goto restore_opts;
+       }
+skip:
+       /* Update the POSIXACL Flag */
+        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+       return 0;
+
+restore_opts:
+       sbi->mount_opt = org_mount_opt;
+       sbi->active_logs = active_logs;
+       return err;
+}
+
 static struct super_operations f2fs_sops = {
        .alloc_inode    = f2fs_alloc_inode,
        .drop_inode     = f2fs_drop_inode,
        .destroy_inode  = f2fs_destroy_inode,
        .write_inode    = f2fs_write_inode,
+       .dirty_inode    = f2fs_dirty_inode,
        .show_options   = f2fs_show_options,
        .evict_inode    = f2fs_evict_inode,
        .put_super      = f2fs_put_super,
@@ -256,6 +405,7 @@ static struct super_operations f2fs_sops = {
        .freeze_fs      = f2fs_freeze,
        .unfreeze_fs    = f2fs_unfreeze,
        .statfs         = f2fs_statfs,
+       .remount_fs     = f2fs_remount,
 };
 
 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
@@ -303,79 +453,6 @@ static const struct export_operations f2fs_export_ops = {
        .get_parent = f2fs_get_parent,
 };
 
-static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
-                               char *options)
-{
-       substring_t args[MAX_OPT_ARGS];
-       char *p;
-       int arg = 0;
-
-       if (!options)
-               return 0;
-
-       while ((p = strsep(&options, ",")) != NULL) {
-               int token;
-               if (!*p)
-                       continue;
-               /*
-                * Initialize args struct so we know whether arg was
-                * found; some options take optional arguments.
-                */
-               args[0].to = args[0].from = NULL;
-               token = match_token(p, f2fs_tokens, args);
-
-               switch (token) {
-               case Opt_gc_background_off:
-                       clear_opt(sbi, BG_GC);
-                       break;
-               case Opt_disable_roll_forward:
-                       set_opt(sbi, DISABLE_ROLL_FORWARD);
-                       break;
-               case Opt_discard:
-                       set_opt(sbi, DISCARD);
-                       break;
-               case Opt_noheap:
-                       set_opt(sbi, NOHEAP);
-                       break;
-#ifdef CONFIG_F2FS_FS_XATTR
-               case Opt_nouser_xattr:
-                       clear_opt(sbi, XATTR_USER);
-                       break;
-#else
-               case Opt_nouser_xattr:
-                       f2fs_msg(sb, KERN_INFO,
-                               "nouser_xattr options not supported");
-                       break;
-#endif
-#ifdef CONFIG_F2FS_FS_POSIX_ACL
-               case Opt_noacl:
-                       clear_opt(sbi, POSIX_ACL);
-                       break;
-#else
-               case Opt_noacl:
-                       f2fs_msg(sb, KERN_INFO, "noacl options not supported");
-                       break;
-#endif
-               case Opt_active_logs:
-                       if (args->from && match_int(args, &arg))
-                               return -EINVAL;
-                       if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
-                               return -EINVAL;
-                       sbi->active_logs = arg;
-                       break;
-               case Opt_disable_ext_identify:
-                       set_opt(sbi, DISABLE_EXT_IDENTIFY);
-                       break;
-               default:
-                       f2fs_msg(sb, KERN_ERR,
-                               "Unrecognized mount option \"%s\" or missing value",
-                               p);
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
 static loff_t max_file_size(unsigned bits)
 {
        loff_t result = ADDRS_PER_INODE;
@@ -541,6 +618,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                if (err)
                        goto free_sb_buf;
        }
+       sb->s_fs_info = sbi;
        /* init some FS parameters */
        sbi->active_logs = NR_CURSEG_TYPE;
 
@@ -553,7 +631,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        set_opt(sbi, POSIX_ACL);
 #endif
        /* parse mount options */
-       err = parse_options(sb, sbi, (char *)data);
+       err = parse_options(sb, (char *)data);
        if (err)
                goto free_sb_buf;
 
@@ -565,7 +643,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_xattr = f2fs_xattr_handlers;
        sb->s_export_op = &f2fs_export_ops;
        sb->s_magic = F2FS_SUPER_MAGIC;
-       sb->s_fs_info = sbi;
        sb->s_time_gran = 1;
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
@@ -674,10 +751,16 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                                "Cannot recover all fsync data errno=%ld", err);
        }
 
-       /* After POR, we can run background GC thread */
-       err = start_gc_thread(sbi);
-       if (err)
-               goto fail;
+       /*
+        * If filesystem is not mounted as read-only then
+        * do start the gc_thread.
+        */
+       if (!(sb->s_flags & MS_RDONLY)) {
+               /* After POR, we can run background GC thread.*/
+               err = start_gc_thread(sbi);
+               if (err)
+                       goto fail;
+       }
 
        err = f2fs_build_stats(sbi);
        if (err)
index 0b02dce313565d5f287b96f464efca6cad0ea2cd..3ab07ecd86ca16b5aaaa4e89792e5fe0db22ffca 100644 (file)
@@ -20,6 +20,7 @@
  */
 #include <linux/rwsem.h>
 #include <linux/f2fs_fs.h>
+#include <linux/security.h>
 #include "f2fs.h"
 #include "xattr.h"
 
@@ -43,6 +44,10 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
                prefix = XATTR_TRUSTED_PREFIX;
                prefix_len = XATTR_TRUSTED_PREFIX_LEN;
                break;
+       case F2FS_XATTR_INDEX_SECURITY:
+               prefix = XATTR_SECURITY_PREFIX;
+               prefix_len = XATTR_SECURITY_PREFIX_LEN;
+               break;
        default:
                return -EINVAL;
        }
@@ -50,7 +55,7 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
        total_len = prefix_len + name_len + 1;
        if (list && total_len <= list_size) {
                memcpy(list, prefix, prefix_len);
-               memcpy(list+prefix_len, name, name_len);
+               memcpy(list + prefix_len, name, name_len);
                list[prefix_len + name_len] = '\0';
        }
        return total_len;
@@ -70,13 +75,14 @@ static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name,
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
                break;
+       case F2FS_XATTR_INDEX_SECURITY:
+               break;
        default:
                return -EINVAL;
        }
        if (strcmp(name, "") == 0)
                return -EINVAL;
-       return f2fs_getxattr(dentry->d_inode, type, name,
-                       buffer, size);
+       return f2fs_getxattr(dentry->d_inode, type, name, buffer, size);
 }
 
 static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
@@ -93,13 +99,15 @@ static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
                break;
+       case F2FS_XATTR_INDEX_SECURITY:
+               break;
        default:
                return -EINVAL;
        }
        if (strcmp(name, "") == 0)
                return -EINVAL;
 
-       return f2fs_setxattr(dentry->d_inode, type, name, value, size);
+       return f2fs_setxattr(dentry->d_inode, type, name, value, size, NULL);
 }
 
 static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list,
@@ -145,6 +153,31 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
        return 0;
 }
 
+#ifdef CONFIG_F2FS_FS_SECURITY
+static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+               void *page)
+{
+       const struct xattr *xattr;
+       int err = 0;
+
+       for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
+                               xattr->name, xattr->value,
+                               xattr->value_len, (struct page *)page);
+               if (err < 0)
+                       break;
+       }
+       return err;
+}
+
+int f2fs_init_security(struct inode *inode, struct inode *dir,
+                               const struct qstr *qstr, struct page *ipage)
+{
+       return security_inode_init_security(inode, dir, qstr,
+                               &f2fs_initxattrs, ipage);
+}
+#endif
+
 const struct xattr_handler f2fs_xattr_user_handler = {
        .prefix = XATTR_USER_PREFIX,
        .flags  = F2FS_XATTR_INDEX_USER,
@@ -169,6 +202,14 @@ const struct xattr_handler f2fs_xattr_advise_handler = {
        .set    = f2fs_xattr_advise_set,
 };
 
+const struct xattr_handler f2fs_xattr_security_handler = {
+       .prefix = XATTR_SECURITY_PREFIX,
+       .flags  = F2FS_XATTR_INDEX_SECURITY,
+       .list   = f2fs_xattr_generic_list,
+       .get    = f2fs_xattr_generic_get,
+       .set    = f2fs_xattr_generic_set,
+};
+
 static const struct xattr_handler *f2fs_xattr_handler_map[] = {
        [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -176,6 +217,9 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = {
        [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler,
 #endif
        [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
+#ifdef CONFIG_F2FS_FS_SECURITY
+       [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler,
+#endif
        [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
 };
 
@@ -186,6 +230,9 @@ const struct xattr_handler *f2fs_xattr_handlers[] = {
        &f2fs_xattr_acl_default_handler,
 #endif
        &f2fs_xattr_trusted_handler,
+#ifdef CONFIG_F2FS_FS_SECURITY
+       &f2fs_xattr_security_handler,
+#endif
        &f2fs_xattr_advise_handler,
        NULL,
 };
@@ -218,6 +265,8 @@ int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
                return -ENODATA;
 
        page = get_node_page(sbi, fi->i_xattr_nid);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
        base_addr = page_address(page);
 
        list_for_each_xattr(entry, base_addr) {
@@ -268,6 +317,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
                return 0;
 
        page = get_node_page(sbi, fi->i_xattr_nid);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
        base_addr = page_address(page);
 
        list_for_each_xattr(entry, base_addr) {
@@ -296,7 +347,7 @@ cleanup:
 }
 
 int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
-                                       const void *value, size_t value_len)
+                       const void *value, size_t value_len, struct page *ipage)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -335,7 +386,7 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
                set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
                mark_inode_dirty(inode);
 
-               page = new_node_page(&dn, XATTR_NODE_OFFSET);
+               page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
                if (IS_ERR(page)) {
                        alloc_nid_failed(sbi, fi->i_xattr_nid);
                        fi->i_xattr_nid = 0;
@@ -435,7 +486,10 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
                inode->i_ctime = CURRENT_TIME;
                clear_inode_flag(fi, FI_ACL_MODE);
        }
-       update_inode_page(inode);
+       if (ipage)
+               update_inode(inode, ipage);
+       else
+               update_inode_page(inode);
        mutex_unlock_op(sbi, ilock);
 
        return 0;
index 49c9558305e3b4dae06e16733f150af76dbfe954..3c0817bef25d5fdbab3279cb446a5e2c20ba5f38 100644 (file)
@@ -112,21 +112,19 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler;
 extern const struct xattr_handler f2fs_xattr_acl_access_handler;
 extern const struct xattr_handler f2fs_xattr_acl_default_handler;
 extern const struct xattr_handler f2fs_xattr_advise_handler;
+extern const struct xattr_handler f2fs_xattr_security_handler;
 
 extern const struct xattr_handler *f2fs_xattr_handlers[];
 
-extern int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
-               const void *value, size_t value_len);
-extern int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
-               void *buffer, size_t buffer_size);
-extern ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
-               size_t buffer_size);
-
+extern int f2fs_setxattr(struct inode *, int, const char *,
+                               const void *, size_t, struct page *);
+extern int f2fs_getxattr(struct inode *, int, const char *, void *, size_t);
+extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
 #else
 
 #define f2fs_xattr_handlers    NULL
 static inline int f2fs_setxattr(struct inode *inode, int name_index,
-       const char *name, const void *value, size_t value_len)
+               const char *name, const void *value, size_t value_len)
 {
        return -EOPNOTSUPP;
 }
@@ -142,4 +140,14 @@ static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
 }
 #endif
 
+#ifdef CONFIG_F2FS_FS_SECURITY
+extern int f2fs_init_security(struct inode *, struct inode *,
+                               const struct qstr *, struct page *);
+#else
+static inline int f2fs_init_security(struct inode *inode, struct inode *dir,
+                               const struct qstr *qstr, struct page *ipage)
+{
+       return 0;
+}
+#endif
 #endif /* __F2FS_XATTR_H__ */
index 7a6f02caf286d4fa20990b259dd5e4773eeff96e..3963ede84eb021887531ea94eb41ea4f203fdc0e 100644 (file)
@@ -543,6 +543,7 @@ end_of_dir:
 EXPORT_SYMBOL_GPL(fat_search_long);
 
 struct fat_ioctl_filldir_callback {
+       struct dir_context ctx;
        void __user *dirent;
        int result;
        /* for dir ioctl */
@@ -552,8 +553,9 @@ struct fat_ioctl_filldir_callback {
        int short_len;
 };
 
-static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
-                        filldir_t filldir, int short_only, int both)
+static int __fat_readdir(struct inode *inode, struct file *file,
+                        struct dir_context *ctx, int short_only,
+                        struct fat_ioctl_filldir_callback *both)
 {
        struct super_block *sb = inode->i_sb;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -564,27 +566,20 @@ static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
        unsigned char bufname[FAT_MAX_SHORT_SIZE];
        int isvfat = sbi->options.isvfat;
        const char *fill_name = NULL;
-       unsigned long inum;
-       unsigned long lpos, dummy, *furrfu = &lpos;
+       int fake_offset = 0;
        loff_t cpos;
        int short_len = 0, fill_len = 0;
        int ret = 0;
 
        mutex_lock(&sbi->s_lock);
 
-       cpos = filp->f_pos;
+       cpos = ctx->pos;
        /* Fake . and .. for the root directory. */
        if (inode->i_ino == MSDOS_ROOT_INO) {
-               while (cpos < 2) {
-                       if (filldir(dirent, "..", cpos+1, cpos,
-                                   MSDOS_ROOT_INO, DT_DIR) < 0)
-                               goto out;
-                       cpos++;
-                       filp->f_pos++;
-               }
-               if (cpos == 2) {
-                       dummy = 2;
-                       furrfu = &dummy;
+               if (!dir_emit_dots(file, ctx))
+                       goto out;
+               if (ctx->pos == 2) {
+                       fake_offset = 1;
                        cpos = 0;
                }
        }
@@ -619,7 +614,7 @@ parse_record:
                int status = fat_parse_long(inode, &cpos, &bh, &de,
                                            &unicode, &nr_slots);
                if (status < 0) {
-                       filp->f_pos = cpos;
+                       ctx->pos = cpos;
                        ret = status;
                        goto out;
                } else if (status == PARSE_INVALID)
@@ -639,6 +634,19 @@ parse_record:
                        /* !both && !short_only, so we don't need shortname. */
                        if (!both)
                                goto start_filldir;
+
+                       short_len = fat_parse_short(sb, de, bufname,
+                                                   sbi->options.dotsOK);
+                       if (short_len == 0)
+                               goto record_end;
+                       /* hack for fat_ioctl_filldir() */
+                       both->longname = fill_name;
+                       both->long_len = fill_len;
+                       both->shortname = bufname;
+                       both->short_len = short_len;
+                       fill_name = NULL;
+                       fill_len = 0;
+                       goto start_filldir;
                }
        }
 
@@ -646,28 +654,21 @@ parse_record:
        if (short_len == 0)
                goto record_end;
 
-       if (nr_slots) {
-               /* hack for fat_ioctl_filldir() */
-               struct fat_ioctl_filldir_callback *p = dirent;
-
-               p->longname = fill_name;
-               p->long_len = fill_len;
-               p->shortname = bufname;
-               p->short_len = short_len;
-               fill_name = NULL;
-               fill_len = 0;
-       } else {
-               fill_name = bufname;
-               fill_len = short_len;
-       }
+       fill_name = bufname;
+       fill_len = short_len;
 
 start_filldir:
-       lpos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
-       if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME))
-               inum = inode->i_ino;
-       else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
-               inum = parent_ino(filp->f_path.dentry);
+       if (!fake_offset)
+               ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
+
+       if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
+               if (!dir_emit_dot(file, ctx))
+                       goto fill_failed;
+       } else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
+               if (!dir_emit_dotdot(file, ctx))
+                       goto fill_failed;
        } else {
+               unsigned long inum;
                loff_t i_pos = fat_make_i_pos(sb, bh, de);
                struct inode *tmp = fat_iget(sb, i_pos);
                if (tmp) {
@@ -675,18 +676,17 @@ start_filldir:
                        iput(tmp);
                } else
                        inum = iunique(sb, MSDOS_ROOT_INO);
+               if (!dir_emit(ctx, fill_name, fill_len, inum,
+                           (de->attr & ATTR_DIR) ? DT_DIR : DT_REG))
+                       goto fill_failed;
        }
 
-       if (filldir(dirent, fill_name, fill_len, *furrfu, inum,
-                   (de->attr & ATTR_DIR) ? DT_DIR : DT_REG) < 0)
-               goto fill_failed;
-
 record_end:
-       furrfu = &lpos;
-       filp->f_pos = cpos;
+       fake_offset = 0;
+       ctx->pos = cpos;
        goto get_new;
 end_of_dir:
-       filp->f_pos = cpos;
+       ctx->pos = cpos;
 fill_failed:
        brelse(bh);
        if (unicode)
@@ -696,10 +696,9 @@ out:
        return ret;
 }
 
-static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int fat_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
-       return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
+       return __fat_readdir(file_inode(file), file, ctx, 0, NULL);
 }
 
 #define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type)                         \
@@ -755,20 +754,25 @@ efault:                                                                      \
 
 FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent)
 
-static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
+static int fat_ioctl_readdir(struct inode *inode, struct file *file,
                             void __user *dirent, filldir_t filldir,
                             int short_only, int both)
 {
-       struct fat_ioctl_filldir_callback buf;
+       struct fat_ioctl_filldir_callback buf = {
+               .ctx.actor = filldir,
+               .dirent = dirent
+       };
        int ret;
 
        buf.dirent = dirent;
        buf.result = 0;
        mutex_lock(&inode->i_mutex);
+       buf.ctx.pos = file->f_pos;
        ret = -ENOENT;
        if (!IS_DEADDIR(inode)) {
-               ret = __fat_readdir(inode, filp, &buf, filldir,
-                                   short_only, both);
+               ret = __fat_readdir(inode, file, &buf.ctx,
+                                   short_only, both ? &buf : NULL);
+               file->f_pos = buf.ctx.pos;
        }
        mutex_unlock(&inode->i_mutex);
        if (ret >= 0)
@@ -854,7 +858,7 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
 const struct file_operations fat_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = fat_readdir,
+       .iterate        = fat_readdir,
        .unlocked_ioctl = fat_dir_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = fat_compat_dir_ioctl,
index 664b07a53870b64f6fa579c6fd3b2002b65fb89c..25d4099a4aead2691774d50e23a9cef59c4fb174 100644 (file)
@@ -49,7 +49,7 @@
 
 
 static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
-static int             vxfs_readdir(struct file *, void *, filldir_t);
+static int             vxfs_readdir(struct file *, struct dir_context *);
 
 const struct inode_operations vxfs_dir_inode_ops = {
        .lookup =               vxfs_lookup,
@@ -58,7 +58,7 @@ const struct inode_operations vxfs_dir_inode_ops = {
 const struct file_operations vxfs_dir_operations = {
        .llseek =               generic_file_llseek,
        .read =                 generic_read_dir,
-       .readdir =              vxfs_readdir,
+       .iterate =              vxfs_readdir,
 };
 
  
@@ -235,7 +235,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags)
  *   Zero.
  */
 static int
-vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
+vxfs_readdir(struct file *fp, struct dir_context *ctx)
 {
        struct inode            *ip = file_inode(fp);
        struct super_block      *sbp = ip->i_sb;
@@ -243,20 +243,17 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
        u_long                  page, npages, block, pblocks, nblocks, offset;
        loff_t                  pos;
 
-       switch ((long)fp->f_pos) {
-       case 0:
-               if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0)
-                       goto out;
-               fp->f_pos++;
-               /* fallthrough */
-       case 1:
-               if (filler(retp, "..", 2, fp->f_pos, VXFS_INO(ip)->vii_dotdot, DT_DIR) < 0)
-                       goto out;
-               fp->f_pos++;
-               /* fallthrough */
+       if (ctx->pos == 0) {
+               if (!dir_emit_dot(fp, ctx))
+                       return 0;
+               ctx->pos = 1;
        }
-
-       pos = fp->f_pos - 2;
+       if (ctx->pos == 1) {
+               if (!dir_emit(ctx, "..", 2, VXFS_INO(ip)->vii_dotdot, DT_DIR))
+                       return 0;
+               ctx->pos = 2;
+       }
+       pos = ctx->pos - 2;
        
        if (pos > VXFS_DIRROUND(ip->i_size))
                return 0;
@@ -270,16 +267,16 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
        block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
 
        for (; page < npages; page++, block = 0) {
-               caddr_t                 kaddr;
+               char                    *kaddr;
                struct page             *pp;
 
                pp = vxfs_get_page(ip->i_mapping, page);
                if (IS_ERR(pp))
                        continue;
-               kaddr = (caddr_t)page_address(pp);
+               kaddr = (char *)page_address(pp);
 
                for (; block <= nblocks && block <= pblocks; block++) {
-                       caddr_t                 baddr, limit;
+                       char                    *baddr, *limit;
                        struct vxfs_dirblk      *dbp;
                        struct vxfs_direct      *de;
 
@@ -292,21 +289,18 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
                                 (kaddr + offset) :
                                 (baddr + VXFS_DIRBLKOV(dbp)));
 
-                       for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) {
-                               int     over;
-
+                       for (; (char *)de <= limit; de = vxfs_next_entry(de)) {
                                if (!de->d_reclen)
                                        break;
                                if (!de->d_ino)
                                        continue;
 
-                               offset = (caddr_t)de - kaddr;
-                               over = filler(retp, de->d_name, de->d_namelen,
-                                       ((page << PAGE_CACHE_SHIFT) | offset) + 2,
-                                       de->d_ino, DT_UNKNOWN);
-                               if (over) {
+                               offset = (char *)de - kaddr;
+                               ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+                               if (!dir_emit(ctx, de->d_name, de->d_namelen,
+                                       de->d_ino, DT_UNKNOWN)) {
                                        vxfs_put_page(pp);
-                                       goto done;
+                                       return 0;
                                }
                        }
                        offset = 0;
@@ -314,9 +308,6 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
                vxfs_put_page(pp);
                offset = 0;
        }
-
-done:
-       fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
-out:
+       ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
        return 0;
 }
index 3be57189efd5b3a8005321f02e40971af9429cf6..a85ac4e33436bc64f59b882e714d440a0cc90dac 100644 (file)
@@ -45,6 +45,7 @@ struct wb_writeback_work {
        unsigned int for_kupdate:1;
        unsigned int range_cyclic:1;
        unsigned int for_background:1;
+       unsigned int for_sync:1;        /* sync(2) WB_SYNC_ALL writeback */
        enum wb_reason reason;          /* why was writeback initiated? */
 
        struct list_head list;          /* pending work list */
@@ -443,9 +444,11 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
        /*
         * Make sure to wait on the data before writing out the metadata.
         * This is important for filesystems that modify metadata on data
-        * I/O completion.
+        * I/O completion. We don't do it for sync(2) writeback because it has a
+        * separate, external IO completion path and ->sync_fs for guaranteeing
+        * inode metadata is written back correctly.
         */
-       if (wbc->sync_mode == WB_SYNC_ALL) {
+       if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
                int err = filemap_fdatawait(mapping);
                if (ret == 0)
                        ret = err;
@@ -578,6 +581,7 @@ static long writeback_sb_inodes(struct super_block *sb,
                .tagged_writepages      = work->tagged_writepages,
                .for_kupdate            = work->for_kupdate,
                .for_background         = work->for_background,
+               .for_sync               = work->for_sync,
                .range_cyclic           = work->range_cyclic,
                .range_start            = 0,
                .range_end              = LLONG_MAX,
@@ -1362,6 +1366,7 @@ void sync_inodes_sb(struct super_block *sb)
                .range_cyclic   = 0,
                .done           = &done,
                .reason         = WB_REASON_SYNC,
+               .for_sync       = 1,
        };
 
        /* Nothing to do? */
index b52aed1dca97fc5769d43a34f8e51de2e28fd653..f7cff367db7f2df8c358b5a723889b4bd8790f54 100644 (file)
@@ -115,7 +115,7 @@ struct fscache_cache *fscache_select_cache_for_object(
                                     struct fscache_object, cookie_link);
 
                cache = object->cache;
-               if (object->state >= FSCACHE_OBJECT_DYING ||
+               if (fscache_object_is_dying(object) ||
                    test_bit(FSCACHE_IOERROR, &cache->flags))
                        cache = NULL;
 
@@ -224,8 +224,10 @@ int fscache_add_cache(struct fscache_cache *cache,
        BUG_ON(!ifsdef);
 
        cache->flags = 0;
-       ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED);
-       ifsdef->state = FSCACHE_OBJECT_ACTIVE;
+       ifsdef->event_mask =
+               ((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) &
+               ~(1 << FSCACHE_OBJECT_EV_CLEARED);
+       __set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags);
 
        if (!tagname)
                tagname = cache->identifier;
@@ -330,25 +332,25 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache,
 {
        struct fscache_object *object;
 
-       spin_lock(&cache->object_list_lock);
-
        while (!list_empty(&cache->object_list)) {
-               object = list_entry(cache->object_list.next,
-                                   struct fscache_object, cache_link);
-               list_move_tail(&object->cache_link, dying_objects);
+               spin_lock(&cache->object_list_lock);
 
-               _debug("withdraw %p", object->cookie);
+               if (!list_empty(&cache->object_list)) {
+                       object = list_entry(cache->object_list.next,
+                                           struct fscache_object, cache_link);
+                       list_move_tail(&object->cache_link, dying_objects);
 
-               spin_lock(&object->lock);
-               spin_unlock(&cache->object_list_lock);
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW);
-               spin_unlock(&object->lock);
+                       _debug("withdraw %p", object->cookie);
+
+                       /* This must be done under object_list_lock to prevent
+                        * a race with fscache_drop_object().
+                        */
+                       fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+               }
 
+               spin_unlock(&cache->object_list_lock);
                cond_resched();
-               spin_lock(&cache->object_list_lock);
        }
-
-       spin_unlock(&cache->object_list_lock);
 }
 
 /**
index e2cba1f60c21217e5c28e7e1a3d096a75fcb3be8..0e91a3c9fdb2018abfcd2588d859876c78b545fd 100644 (file)
@@ -95,6 +95,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
        atomic_set(&cookie->usage, 1);
        atomic_set(&cookie->n_children, 0);
 
+       /* We keep the active count elevated until relinquishment to prevent an
+        * attempt to wake up every time the object operations queue quiesces.
+        */
+       atomic_set(&cookie->n_active, 1);
+
        atomic_inc(&parent->usage);
        atomic_inc(&parent->n_children);
 
@@ -177,7 +182,6 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
 
        cookie->flags =
                (1 << FSCACHE_COOKIE_LOOKING_UP) |
-               (1 << FSCACHE_COOKIE_CREATING) |
                (1 << FSCACHE_COOKIE_NO_DATA_YET);
 
        /* ask the cache to allocate objects for this cookie and its parent
@@ -205,7 +209,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
 
        /* initiate the process of looking up all the objects in the chain
         * (done by fscache_initialise_object()) */
-       fscache_enqueue_object(object);
+       fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
 
        spin_unlock(&cookie->lock);
 
@@ -285,7 +289,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
 
 object_already_extant:
        ret = -ENOBUFS;
-       if (object->state >= FSCACHE_OBJECT_DYING) {
+       if (fscache_object_is_dead(object)) {
                spin_unlock(&cookie->lock);
                goto error;
        }
@@ -321,7 +325,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
        ret = -EEXIST;
        hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
                if (p->cache == object->cache) {
-                       if (p->state >= FSCACHE_OBJECT_DYING)
+                       if (fscache_object_is_dying(p))
                                ret = -ENOBUFS;
                        goto cant_attach_object;
                }
@@ -332,7 +336,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
        hlist_for_each_entry(p, &cookie->parent->backing_objects,
                             cookie_link) {
                if (p->cache == object->cache) {
-                       if (p->state >= FSCACHE_OBJECT_DYING) {
+                       if (fscache_object_is_dying(p)) {
                                ret = -ENOBUFS;
                                spin_unlock(&cookie->parent->lock);
                                goto cant_attach_object;
@@ -400,7 +404,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
                        object = hlist_entry(cookie->backing_objects.first,
                                             struct fscache_object,
                                             cookie_link);
-                       if (object->state < FSCACHE_OBJECT_DYING)
+                       if (fscache_object_is_live(object))
                                fscache_raise_event(
                                        object, FSCACHE_OBJECT_EV_INVALIDATE);
                }
@@ -467,9 +471,7 @@ EXPORT_SYMBOL(__fscache_update_cookie);
  */
 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
 {
-       struct fscache_cache *cache;
        struct fscache_object *object;
-       unsigned long event;
 
        fscache_stat(&fscache_n_relinquishes);
        if (retire)
@@ -481,8 +483,11 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
                return;
        }
 
-       _enter("%p{%s,%p},%d",
-              cookie, cookie->def->name, cookie->netfs_data, retire);
+       _enter("%p{%s,%p,%d},%d",
+              cookie, cookie->def->name, cookie->netfs_data,
+              atomic_read(&cookie->n_active), retire);
+
+       ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
 
        if (atomic_read(&cookie->n_children) != 0) {
                printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
@@ -490,62 +495,28 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
                BUG();
        }
 
-       /* wait for the cookie to finish being instantiated (or to fail) */
-       if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
-               fscache_stat(&fscache_n_relinquishes_waitcrt);
-               wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
-                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-       }
-
-       event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
+       /* No further netfs-accessing operations on this cookie permitted */
+       set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
+       if (retire)
+               set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
 
-try_again:
        spin_lock(&cookie->lock);
-
-       /* break links with all the active objects */
-       while (!hlist_empty(&cookie->backing_objects)) {
-               int n_reads;
-               object = hlist_entry(cookie->backing_objects.first,
-                                    struct fscache_object,
-                                    cookie_link);
-
-               _debug("RELEASE OBJ%x", object->debug_id);
-
-               set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags);
-               n_reads = atomic_read(&object->n_reads);
-               if (n_reads) {
-                       int n_ops = object->n_ops;
-                       int n_in_progress = object->n_in_progress;
-                       spin_unlock(&cookie->lock);
-                       printk(KERN_ERR "FS-Cache:"
-                              " Cookie '%s' still has %d outstanding reads (%d,%d)\n",
-                              cookie->def->name,
-                              n_reads, n_ops, n_in_progress);
-                       wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS,
-                                   fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-                       printk("Wait finished\n");
-                       goto try_again;
-               }
-
-               /* detach each cache object from the object cookie */
-               spin_lock(&object->lock);
-               hlist_del_init(&object->cookie_link);
-
-               cache = object->cache;
-               object->cookie = NULL;
-               fscache_raise_event(object, event);
-               spin_unlock(&object->lock);
-
-               if (atomic_dec_and_test(&cookie->usage))
-                       /* the cookie refcount shouldn't be reduced to 0 yet */
-                       BUG();
+       hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
+               fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
        }
+       spin_unlock(&cookie->lock);
 
-       /* detach pointers back to the netfs */
+       /* Wait for cessation of activity requiring access to the netfs (when
+        * n_active reaches 0).
+        */
+       if (!atomic_dec_and_test(&cookie->n_active))
+               wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
+                                TASK_UNINTERRUPTIBLE);
+
+       /* Clear pointers back to the netfs */
        cookie->netfs_data      = NULL;
        cookie->def             = NULL;
-
-       spin_unlock(&cookie->lock);
+       BUG_ON(cookie->stores.rnode);
 
        if (cookie->parent) {
                ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
@@ -553,7 +524,7 @@ try_again:
                atomic_dec(&cookie->parent->n_children);
        }
 
-       /* finally dispose of the cookie */
+       /* Dispose of the netfs's link to the cookie */
        ASSERTCMP(atomic_read(&cookie->usage), >, 0);
        fscache_cookie_put(cookie);
 
index f5b4baee735208d5124f31d51226485db2b2d4e1..10a2ade0bdf8c1ff390be0e82c5dd2e161732054 100644 (file)
@@ -55,6 +55,7 @@ static struct fscache_cookie_def fscache_fsdef_index_def = {
 
 struct fscache_cookie fscache_fsdef_index = {
        .usage          = ATOMIC_INIT(1),
+       .n_active       = ATOMIC_INIT(1),
        .lock           = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
        .backing_objects = HLIST_HEAD_INIT,
        .def            = &fscache_fsdef_index_def,
index ee38fef4be5173dc43abf6648b5a7cf00037a7ca..12d505bedb5c2ce6a808948b5ee082bca9e127cc 100644 (file)
@@ -93,14 +93,11 @@ static inline bool fscache_object_congested(void)
 
 extern int fscache_wait_bit(void *);
 extern int fscache_wait_bit_interruptible(void *);
+extern int fscache_wait_atomic_t(atomic_t *);
 
 /*
  * object.c
  */
-extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
-
-extern void fscache_withdrawing_object(struct fscache_cache *,
-                                      struct fscache_object *);
 extern void fscache_enqueue_object(struct fscache_object *);
 
 /*
@@ -110,8 +107,10 @@ extern void fscache_enqueue_object(struct fscache_object *);
 extern const struct file_operations fscache_objlist_fops;
 
 extern void fscache_objlist_add(struct fscache_object *);
+extern void fscache_objlist_remove(struct fscache_object *);
 #else
 #define fscache_objlist_add(object) do {} while(0)
+#define fscache_objlist_remove(object) do {} while(0)
 #endif
 
 /*
@@ -291,6 +290,10 @@ static inline void fscache_raise_event(struct fscache_object *object,
                                       unsigned event)
 {
        BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS);
+#if 0
+       printk("*** fscache_raise_event(OBJ%d{%lx},%x)\n",
+              object->debug_id, object->event_mask, (1 << event));
+#endif
        if (!test_and_set_bit(event, &object->events) &&
            test_bit(event, &object->event_mask))
                fscache_enqueue_object(object);
index f9d856773f7993ab7eb99ec0af67daea119972e1..7c27907e650c216cade20928cd8fb4f0f1ac4363 100644 (file)
@@ -205,7 +205,6 @@ int fscache_wait_bit(void *flags)
        schedule();
        return 0;
 }
-EXPORT_SYMBOL(fscache_wait_bit);
 
 /*
  * wait_on_bit() sleep function for interruptible waiting
@@ -215,4 +214,12 @@ int fscache_wait_bit_interruptible(void *flags)
        schedule();
        return signal_pending(current);
 }
-EXPORT_SYMBOL(fscache_wait_bit_interruptible);
+
+/*
+ * wait_on_atomic_t() sleep function for uninterruptible waiting
+ */
+int fscache_wait_atomic_t(atomic_t *p)
+{
+       schedule();
+       return 0;
+}
index e028b8eb1c409bf730579f3401082fcb8f31c67e..b1bb6117473a25a292ccf326dcd607fcf4a4f354 100644 (file)
@@ -40,6 +40,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
        /* initialise the primary index cookie */
        atomic_set(&netfs->primary_index->usage, 1);
        atomic_set(&netfs->primary_index->n_children, 0);
+       atomic_set(&netfs->primary_index->n_active, 1);
 
        netfs->primary_index->def               = &fscache_fsdef_netfs_def;
        netfs->primary_index->parent            = &fscache_fsdef_index;
index f27c89d178855bbe2adfd0aba12a816eae7ab409..e1959efad64fa0f4101541683e8c422d27a37aa7 100644 (file)
@@ -70,13 +70,10 @@ void fscache_objlist_add(struct fscache_object *obj)
        write_unlock(&fscache_object_list_lock);
 }
 
-/**
- * fscache_object_destroy - Note that a cache object is about to be destroyed
- * @object: The object to be destroyed
- *
- * Note the imminent destruction and deallocation of a cache object record.
+/*
+ * Remove an object from the object list.
  */
-void fscache_object_destroy(struct fscache_object *obj)
+void fscache_objlist_remove(struct fscache_object *obj)
 {
        write_lock(&fscache_object_list_lock);
 
@@ -85,7 +82,6 @@ void fscache_object_destroy(struct fscache_object *obj)
 
        write_unlock(&fscache_object_list_lock);
 }
-EXPORT_SYMBOL(fscache_object_destroy);
 
 /*
  * find the object in the tree on or after the specified index
@@ -166,15 +162,14 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
 {
        struct fscache_objlist_data *data = m->private;
        struct fscache_object *obj = v;
+       struct fscache_cookie *cookie;
        unsigned long config = data->config;
-       uint16_t keylen, auxlen;
        char _type[3], *type;
-       bool no_cookie;
        u8 *buf = data->buf, *p;
 
        if ((unsigned long) v == 1) {
                seq_puts(m, "OBJECT   PARENT   STAT CHLDN OPS OOP IPR EX READS"
-                        " EM EV F S"
+                        " EM EV FL S"
                         " | NETFS_COOKIE_DEF TY FL NETFS_DATA");
                if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
                              FSCACHE_OBJLIST_CONFIG_AUX))
@@ -193,7 +188,7 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
 
        if ((unsigned long) v == 2) {
                seq_puts(m, "======== ======== ==== ===== === === === == ====="
-                        " == == = ="
+                        " == == == ="
                         " | ================ == == ================");
                if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
                              FSCACHE_OBJLIST_CONFIG_AUX))
@@ -216,10 +211,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
                }                                                       \
        } while(0)
 
+       cookie = obj->cookie;
        if (~config) {
-               FILTER(obj->cookie,
+               FILTER(cookie->def,
                       COOKIE, NOCOOKIE);
-               FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
+               FILTER(fscache_object_is_active(obj) ||
                       obj->n_ops != 0 ||
                       obj->n_obj_ops != 0 ||
                       obj->flags ||
@@ -235,10 +231,10 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
        }
 
        seq_printf(m,
-                  "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1x | ",
+                  "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %2lx %1x | ",
                   obj->debug_id,
                   obj->parent ? obj->parent->debug_id : -1,
-                  fscache_object_states_short[obj->state],
+                  obj->state->short_name,
                   obj->n_children,
                   obj->n_ops,
                   obj->n_obj_ops,
@@ -250,48 +246,40 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
                   obj->flags,
                   work_busy(&obj->work));
 
-       no_cookie = true;
-       keylen = auxlen = 0;
-       if (obj->cookie) {
-               spin_lock(&obj->lock);
-               if (obj->cookie) {
-                       switch (obj->cookie->def->type) {
-                       case 0:
-                               type = "IX";
-                               break;
-                       case 1:
-                               type = "DT";
-                               break;
-                       default:
-                               sprintf(_type, "%02u",
-                                       obj->cookie->def->type);
-                               type = _type;
-                               break;
-                       }
+       if (fscache_use_cookie(obj)) {
+               uint16_t keylen = 0, auxlen = 0;
 
-                       seq_printf(m, "%-16s %s %2lx %16p",
-                                  obj->cookie->def->name,
-                                  type,
-                                  obj->cookie->flags,
-                                  obj->cookie->netfs_data);
-
-                       if (obj->cookie->def->get_key &&
-                           config & FSCACHE_OBJLIST_CONFIG_KEY)
-                               keylen = obj->cookie->def->get_key(
-                                       obj->cookie->netfs_data,
-                                       buf, 400);
-
-                       if (obj->cookie->def->get_aux &&
-                           config & FSCACHE_OBJLIST_CONFIG_AUX)
-                               auxlen = obj->cookie->def->get_aux(
-                                       obj->cookie->netfs_data,
-                                       buf + keylen, 512 - keylen);
-
-                       no_cookie = false;
+               switch (cookie->def->type) {
+               case 0:
+                       type = "IX";
+                       break;
+               case 1:
+                       type = "DT";
+                       break;
+               default:
+                       sprintf(_type, "%02u", cookie->def->type);
+                       type = _type;
+                       break;
                }
-               spin_unlock(&obj->lock);
 
-               if (!no_cookie && (keylen > 0 || auxlen > 0)) {
+               seq_printf(m, "%-16s %s %2lx %16p",
+                          cookie->def->name,
+                          type,
+                          cookie->flags,
+                          cookie->netfs_data);
+
+               if (cookie->def->get_key &&
+                   config & FSCACHE_OBJLIST_CONFIG_KEY)
+                       keylen = cookie->def->get_key(cookie->netfs_data,
+                                                     buf, 400);
+
+               if (cookie->def->get_aux &&
+                   config & FSCACHE_OBJLIST_CONFIG_AUX)
+                       auxlen = cookie->def->get_aux(cookie->netfs_data,
+                                                     buf + keylen, 512 - keylen);
+               fscache_unuse_cookie(obj);
+
+               if (keylen > 0 || auxlen > 0) {
                        seq_printf(m, " ");
                        for (p = buf; keylen > 0; keylen--)
                                seq_printf(m, "%02x", *p++);
@@ -302,12 +290,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
                                        seq_printf(m, "%02x", *p++);
                        }
                }
-       }
 
-       if (no_cookie)
-               seq_printf(m, "<no_cookie>\n");
-       else
                seq_printf(m, "\n");
+       } else {
+               seq_printf(m, "<no_netfs>\n");
+       }
        return 0;
 }
 
index 50d41c1802110b472f93eae0cae5f775d332fd13..86d75a60b20c85543bd445869d9a57f37d42e920 100644 (file)
 #define FSCACHE_DEBUG_LEVEL COOKIE
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include "internal.h"
 
-const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
-       [FSCACHE_OBJECT_INIT]           = "OBJECT_INIT",
-       [FSCACHE_OBJECT_LOOKING_UP]     = "OBJECT_LOOKING_UP",
-       [FSCACHE_OBJECT_CREATING]       = "OBJECT_CREATING",
-       [FSCACHE_OBJECT_AVAILABLE]      = "OBJECT_AVAILABLE",
-       [FSCACHE_OBJECT_ACTIVE]         = "OBJECT_ACTIVE",
-       [FSCACHE_OBJECT_INVALIDATING]   = "OBJECT_INVALIDATING",
-       [FSCACHE_OBJECT_UPDATING]       = "OBJECT_UPDATING",
-       [FSCACHE_OBJECT_DYING]          = "OBJECT_DYING",
-       [FSCACHE_OBJECT_LC_DYING]       = "OBJECT_LC_DYING",
-       [FSCACHE_OBJECT_ABORT_INIT]     = "OBJECT_ABORT_INIT",
-       [FSCACHE_OBJECT_RELEASING]      = "OBJECT_RELEASING",
-       [FSCACHE_OBJECT_RECYCLING]      = "OBJECT_RECYCLING",
-       [FSCACHE_OBJECT_WITHDRAWING]    = "OBJECT_WITHDRAWING",
-       [FSCACHE_OBJECT_DEAD]           = "OBJECT_DEAD",
+static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
+static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
+static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
+static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
+static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
+static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
+static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+
+#define __STATE_NAME(n) fscache_osm_##n
+#define STATE(n) (&__STATE_NAME(n))
+
+/*
+ * Define a work state.  Work states are execution states.  No event processing
+ * is performed by them.  The function attached to a work state returns a
+ * pointer indicating the next state to which the state machine should
+ * transition.  Returning NO_TRANSIT repeats the current state, but goes back
+ * to the scheduler first.
+ */
+#define WORK_STATE(n, sn, f) \
+       const struct fscache_state __STATE_NAME(n) = {                  \
+               .name = #n,                                             \
+               .short_name = sn,                                       \
+               .work = f                                               \
+       }
+
+/*
+ * Returns from work states.
+ */
+#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
+
+#define NO_TRANSIT ((struct fscache_state *)NULL)
+
+/*
+ * Define a wait state.  Wait states are event processing states.  No execution
+ * is performed by them.  Wait states are just tables of "if event X occurs,
+ * clear it and transition to state Y".  The dispatcher returns to the
+ * scheduler if none of the events in which the wait state has an interest are
+ * currently pending.
+ */
+#define WAIT_STATE(n, sn, ...) \
+       const struct fscache_state __STATE_NAME(n) = {                  \
+               .name = #n,                                             \
+               .short_name = sn,                                       \
+               .work = NULL,                                           \
+               .transitions = { __VA_ARGS__, { 0, NULL } }             \
+       }
+
+#define TRANSIT_TO(state, emask) \
+       { .events = (emask), .transit_to = STATE(state) }
+
+/*
+ * The object state machine.
+ */
+static WORK_STATE(INIT_OBJECT,         "INIT", fscache_initialise_object);
+static WORK_STATE(PARENT_READY,                "PRDY", fscache_parent_ready);
+static WORK_STATE(ABORT_INIT,          "ABRT", fscache_abort_initialisation);
+static WORK_STATE(LOOK_UP_OBJECT,      "LOOK", fscache_look_up_object);
+static WORK_STATE(CREATE_OBJECT,       "CRTO", fscache_look_up_object);
+static WORK_STATE(OBJECT_AVAILABLE,    "AVBL", fscache_object_available);
+static WORK_STATE(JUMPSTART_DEPS,      "JUMP", fscache_jumpstart_dependents);
+
+static WORK_STATE(INVALIDATE_OBJECT,   "INVL", fscache_invalidate_object);
+static WORK_STATE(UPDATE_OBJECT,       "UPDT", fscache_update_object);
+
+static WORK_STATE(LOOKUP_FAILURE,      "LCFL", fscache_lookup_failure);
+static WORK_STATE(KILL_OBJECT,         "KILL", fscache_kill_object);
+static WORK_STATE(KILL_DEPENDENTS,     "KDEP", fscache_kill_dependents);
+static WORK_STATE(DROP_OBJECT,         "DROP", fscache_drop_object);
+static WORK_STATE(OBJECT_DEAD,         "DEAD", (void*)2UL);
+
+static WAIT_STATE(WAIT_FOR_INIT,       "?INI",
+                 TRANSIT_TO(INIT_OBJECT,       1 << FSCACHE_OBJECT_EV_NEW_CHILD));
+
+static WAIT_STATE(WAIT_FOR_PARENT,     "?PRN",
+                 TRANSIT_TO(PARENT_READY,      1 << FSCACHE_OBJECT_EV_PARENT_READY));
+
+static WAIT_STATE(WAIT_FOR_CMD,                "?CMD",
+                 TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
+                 TRANSIT_TO(UPDATE_OBJECT,     1 << FSCACHE_OBJECT_EV_UPDATE),
+                 TRANSIT_TO(JUMPSTART_DEPS,    1 << FSCACHE_OBJECT_EV_NEW_CHILD));
+
+static WAIT_STATE(WAIT_FOR_CLEARANCE,  "?CLR",
+                 TRANSIT_TO(KILL_OBJECT,       1 << FSCACHE_OBJECT_EV_CLEARED));
+
+/*
+ * Out-of-band event transition tables.  These are for handling unexpected
+ * events, such as an I/O error.  If an OOB event occurs, the state machine
+ * clears and disables the event and forces a transition to the nominated work
+ * state (acurrently executing work states will complete first).
+ *
+ * In such a situation, object->state remembers the state the machine should
+ * have been in/gone to and returning NO_TRANSIT returns to that.
+ */
+static const struct fscache_transition fscache_osm_init_oob[] = {
+          TRANSIT_TO(ABORT_INIT,
+                     (1 << FSCACHE_OBJECT_EV_ERROR) |
+                     (1 << FSCACHE_OBJECT_EV_KILL)),
+          { 0, NULL }
+};
+
+static const struct fscache_transition fscache_osm_lookup_oob[] = {
+          TRANSIT_TO(LOOKUP_FAILURE,
+                     (1 << FSCACHE_OBJECT_EV_ERROR) |
+                     (1 << FSCACHE_OBJECT_EV_KILL)),
+          { 0, NULL }
 };
-EXPORT_SYMBOL(fscache_object_states);
-
-const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
-       [FSCACHE_OBJECT_INIT]           = "INIT",
-       [FSCACHE_OBJECT_LOOKING_UP]     = "LOOK",
-       [FSCACHE_OBJECT_CREATING]       = "CRTN",
-       [FSCACHE_OBJECT_AVAILABLE]      = "AVBL",
-       [FSCACHE_OBJECT_ACTIVE]         = "ACTV",
-       [FSCACHE_OBJECT_INVALIDATING]   = "INVL",
-       [FSCACHE_OBJECT_UPDATING]       = "UPDT",
-       [FSCACHE_OBJECT_DYING]          = "DYNG",
-       [FSCACHE_OBJECT_LC_DYING]       = "LCDY",
-       [FSCACHE_OBJECT_ABORT_INIT]     = "ABTI",
-       [FSCACHE_OBJECT_RELEASING]      = "RELS",
-       [FSCACHE_OBJECT_RECYCLING]      = "RCYC",
-       [FSCACHE_OBJECT_WITHDRAWING]    = "WTHD",
-       [FSCACHE_OBJECT_DEAD]           = "DEAD",
+
+static const struct fscache_transition fscache_osm_run_oob[] = {
+          TRANSIT_TO(KILL_OBJECT,
+                     (1 << FSCACHE_OBJECT_EV_ERROR) |
+                     (1 << FSCACHE_OBJECT_EV_KILL)),
+          { 0, NULL }
 };
 
 static int  fscache_get_object(struct fscache_object *);
 static void fscache_put_object(struct fscache_object *);
-static void fscache_initialise_object(struct fscache_object *);
-static void fscache_lookup_object(struct fscache_object *);
-static void fscache_object_available(struct fscache_object *);
-static void fscache_invalidate_object(struct fscache_object *);
-static void fscache_release_object(struct fscache_object *);
-static void fscache_withdraw_object(struct fscache_object *);
-static void fscache_enqueue_dependents(struct fscache_object *);
+static bool fscache_enqueue_dependents(struct fscache_object *, int);
 static void fscache_dequeue_object(struct fscache_object *);
 
 /*
@@ -75,295 +154,116 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
               object->debug_id, parent->debug_id, parent->n_ops);
 
        spin_lock_nested(&parent->lock, 1);
-       parent->n_ops--;
        parent->n_obj_ops--;
+       parent->n_ops--;
        if (parent->n_ops == 0)
                fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
        spin_unlock(&parent->lock);
 }
 
 /*
- * Notify netfs of invalidation completion.
+ * Object state machine dispatcher.
  */
-static inline void fscache_invalidation_complete(struct fscache_cookie *cookie)
+static void fscache_object_sm_dispatcher(struct fscache_object *object)
 {
-       if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
-               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
-}
-
-/*
- * process events that have been sent to an object's state machine
- * - initiates parent lookup
- * - does object lookup
- * - does object creation
- * - does object recycling and retirement
- * - does object withdrawal
- */
-static void fscache_object_state_machine(struct fscache_object *object)
-{
-       enum fscache_object_state new_state;
-       struct fscache_cookie *cookie;
-       int event;
+       const struct fscache_transition *t;
+       const struct fscache_state *state, *new_state;
+       unsigned long events, event_mask;
+       int event = -1;
 
        ASSERT(object != NULL);
 
        _enter("{OBJ%x,%s,%lx}",
-              object->debug_id, fscache_object_states[object->state],
-              object->events);
-
-       switch (object->state) {
-               /* wait for the parent object to become ready */
-       case FSCACHE_OBJECT_INIT:
-               object->event_mask =
-                       FSCACHE_OBJECT_EVENTS_MASK &
-                       ~(1 << FSCACHE_OBJECT_EV_CLEARED);
-               fscache_initialise_object(object);
-               goto done;
-
-               /* look up the object metadata on disk */
-       case FSCACHE_OBJECT_LOOKING_UP:
-               fscache_lookup_object(object);
-               goto lookup_transit;
-
-               /* create the object metadata on disk */
-       case FSCACHE_OBJECT_CREATING:
-               fscache_lookup_object(object);
-               goto lookup_transit;
-
-               /* handle an object becoming available; start pending
-                * operations and queue dependent operations for processing */
-       case FSCACHE_OBJECT_AVAILABLE:
-               fscache_object_available(object);
-               goto active_transit;
-
-               /* normal running state */
-       case FSCACHE_OBJECT_ACTIVE:
-               goto active_transit;
-
-               /* Invalidate an object on disk */
-       case FSCACHE_OBJECT_INVALIDATING:
-               clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
-               fscache_stat(&fscache_n_invalidates_run);
-               fscache_stat(&fscache_n_cop_invalidate_object);
-               fscache_invalidate_object(object);
-               fscache_stat_d(&fscache_n_cop_invalidate_object);
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
-               goto active_transit;
-
-               /* update the object metadata on disk */
-       case FSCACHE_OBJECT_UPDATING:
-               clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
-               fscache_stat(&fscache_n_updates_run);
-               fscache_stat(&fscache_n_cop_update_object);
-               object->cache->ops->update_object(object);
-               fscache_stat_d(&fscache_n_cop_update_object);
-               goto active_transit;
-
-               /* handle an object dying during lookup or creation */
-       case FSCACHE_OBJECT_LC_DYING:
-               object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
-               fscache_stat(&fscache_n_cop_lookup_complete);
-               object->cache->ops->lookup_complete(object);
-               fscache_stat_d(&fscache_n_cop_lookup_complete);
-
-               spin_lock(&object->lock);
-               object->state = FSCACHE_OBJECT_DYING;
-               cookie = object->cookie;
-               if (cookie) {
-                       if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
-                                              &cookie->flags))
-                               wake_up_bit(&cookie->flags,
-                                           FSCACHE_COOKIE_LOOKING_UP);
-                       if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
-                                              &cookie->flags))
-                               wake_up_bit(&cookie->flags,
-                                           FSCACHE_COOKIE_CREATING);
+              object->debug_id, object->state->name, object->events);
+
+       event_mask = object->event_mask;
+restart:
+       object->event_mask = 0; /* Mask normal event handling */
+       state = object->state;
+restart_masked:
+       events = object->events;
+
+       /* Handle any out-of-band events (typically an error) */
+       if (events & object->oob_event_mask) {
+               _debug("{OBJ%x} oob %lx",
+                      object->debug_id, events & object->oob_event_mask);
+               for (t = object->oob_table; t->events; t++) {
+                       if (events & t->events) {
+                               state = t->transit_to;
+                               ASSERT(state->work != NULL);
+                               event = fls(events & t->events) - 1;
+                               __clear_bit(event, &object->oob_event_mask);
+                               clear_bit(event, &object->events);
+                               goto execute_work_state;
+                       }
                }
-               spin_unlock(&object->lock);
+       }
 
-               fscache_done_parent_op(object);
+       /* Wait states are just transition tables */
+       if (!state->work) {
+               if (events & event_mask) {
+                       for (t = state->transitions; t->events; t++) {
+                               if (events & t->events) {
+                                       new_state = t->transit_to;
+                                       event = fls(events & t->events) - 1;
+                                       clear_bit(event, &object->events);
+                                       _debug("{OBJ%x} ev %d: %s -> %s",
+                                              object->debug_id, event,
+                                              state->name, new_state->name);
+                                       object->state = state = new_state;
+                                       goto execute_work_state;
+                               }
+                       }
 
-               /* wait for completion of all active operations on this object
-                * and the death of all child objects of this object */
-       case FSCACHE_OBJECT_DYING:
-       dying:
-               clear_bit(FSCACHE_OBJECT_EV_CLEARED, &object->events);
-               spin_lock(&object->lock);
-               _debug("dying OBJ%x {%d,%d}",
-                      object->debug_id, object->n_ops, object->n_children);
-               if (object->n_ops == 0 && object->n_children == 0) {
-                       object->event_mask &=
-                               ~(1 << FSCACHE_OBJECT_EV_CLEARED);
-                       object->event_mask |=
-                               (1 << FSCACHE_OBJECT_EV_WITHDRAW) |
-                               (1 << FSCACHE_OBJECT_EV_RETIRE) |
-                               (1 << FSCACHE_OBJECT_EV_RELEASE) |
-                               (1 << FSCACHE_OBJECT_EV_ERROR);
-               } else {
-                       object->event_mask &=
-                               ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
-                                 (1 << FSCACHE_OBJECT_EV_RETIRE) |
-                                 (1 << FSCACHE_OBJECT_EV_RELEASE) |
-                                 (1 << FSCACHE_OBJECT_EV_ERROR));
-                       object->event_mask |=
-                               1 << FSCACHE_OBJECT_EV_CLEARED;
+                       /* The event mask didn't include all the tabled bits */
+                       BUG();
                }
-               spin_unlock(&object->lock);
-               fscache_enqueue_dependents(object);
-               fscache_start_operations(object);
-               goto terminal_transit;
-
-               /* handle an abort during initialisation */
-       case FSCACHE_OBJECT_ABORT_INIT:
-               _debug("handle abort init %lx", object->events);
-               object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
-
-               spin_lock(&object->lock);
-               fscache_dequeue_object(object);
-
-               object->state = FSCACHE_OBJECT_DYING;
-               if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
-                                      &object->cookie->flags))
-                       wake_up_bit(&object->cookie->flags,
-                                   FSCACHE_COOKIE_CREATING);
-               spin_unlock(&object->lock);
-               goto dying;
-
-               /* handle the netfs releasing an object and possibly marking it
-                * obsolete too */
-       case FSCACHE_OBJECT_RELEASING:
-       case FSCACHE_OBJECT_RECYCLING:
-               object->event_mask &=
-                       ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
-                         (1 << FSCACHE_OBJECT_EV_RETIRE) |
-                         (1 << FSCACHE_OBJECT_EV_RELEASE) |
-                         (1 << FSCACHE_OBJECT_EV_ERROR));
-               fscache_release_object(object);
-               spin_lock(&object->lock);
-               object->state = FSCACHE_OBJECT_DEAD;
-               spin_unlock(&object->lock);
-               fscache_stat(&fscache_n_object_dead);
-               goto terminal_transit;
-
-               /* handle the parent cache of this object being withdrawn from
-                * active service */
-       case FSCACHE_OBJECT_WITHDRAWING:
-               object->event_mask &=
-                       ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
-                         (1 << FSCACHE_OBJECT_EV_RETIRE) |
-                         (1 << FSCACHE_OBJECT_EV_RELEASE) |
-                         (1 << FSCACHE_OBJECT_EV_ERROR));
-               fscache_withdraw_object(object);
-               spin_lock(&object->lock);
-               object->state = FSCACHE_OBJECT_DEAD;
-               spin_unlock(&object->lock);
-               fscache_stat(&fscache_n_object_dead);
-               goto terminal_transit;
-
-               /* complain about the object being woken up once it is
-                * deceased */
-       case FSCACHE_OBJECT_DEAD:
-               printk(KERN_ERR "FS-Cache:"
-                      " Unexpected event in dead state %lx\n",
-                      object->events & object->event_mask);
-               BUG();
-
-       default:
-               printk(KERN_ERR "FS-Cache: Unknown object state %u\n",
-                      object->state);
-               BUG();
-       }
-
-       /* determine the transition from a lookup state */
-lookup_transit:
-       event = fls(object->events & object->event_mask) - 1;
-       switch (event) {
-       case FSCACHE_OBJECT_EV_WITHDRAW:
-       case FSCACHE_OBJECT_EV_RETIRE:
-       case FSCACHE_OBJECT_EV_RELEASE:
-       case FSCACHE_OBJECT_EV_ERROR:
-               new_state = FSCACHE_OBJECT_LC_DYING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_INVALIDATE:
-               new_state = FSCACHE_OBJECT_INVALIDATING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_REQUEUE:
-               goto done;
-       case -1:
-               goto done; /* sleep until event */
-       default:
-               goto unsupported_event;
+               /* Randomly woke up */
+               goto unmask_events;
        }
 
-       /* determine the transition from an active state */
-active_transit:
-       event = fls(object->events & object->event_mask) - 1;
-       switch (event) {
-       case FSCACHE_OBJECT_EV_WITHDRAW:
-       case FSCACHE_OBJECT_EV_RETIRE:
-       case FSCACHE_OBJECT_EV_RELEASE:
-       case FSCACHE_OBJECT_EV_ERROR:
-               new_state = FSCACHE_OBJECT_DYING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_INVALIDATE:
-               new_state = FSCACHE_OBJECT_INVALIDATING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_UPDATE:
-               new_state = FSCACHE_OBJECT_UPDATING;
-               goto change_state;
-       case -1:
-               new_state = FSCACHE_OBJECT_ACTIVE;
-               goto change_state; /* sleep until event */
-       default:
-               goto unsupported_event;
-       }
+execute_work_state:
+       _debug("{OBJ%x} exec %s", object->debug_id, state->name);
 
-       /* determine the transition from a terminal state */
-terminal_transit:
-       event = fls(object->events & object->event_mask) - 1;
-       switch (event) {
-       case FSCACHE_OBJECT_EV_WITHDRAW:
-               new_state = FSCACHE_OBJECT_WITHDRAWING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_RETIRE:
-               new_state = FSCACHE_OBJECT_RECYCLING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_RELEASE:
-               new_state = FSCACHE_OBJECT_RELEASING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_ERROR:
-               new_state = FSCACHE_OBJECT_WITHDRAWING;
-               goto change_state;
-       case FSCACHE_OBJECT_EV_CLEARED:
-               new_state = FSCACHE_OBJECT_DYING;
-               goto change_state;
-       case -1:
-               goto done; /* sleep until event */
-       default:
-               goto unsupported_event;
+       new_state = state->work(object, event);
+       event = -1;
+       if (new_state == NO_TRANSIT) {
+               _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+               fscache_enqueue_object(object);
+               event_mask = object->oob_event_mask;
+               goto unmask_events;
        }
 
-change_state:
-       spin_lock(&object->lock);
-       object->state = new_state;
-       spin_unlock(&object->lock);
+       _debug("{OBJ%x} %s -> %s",
+              object->debug_id, state->name, new_state->name);
+       object->state = state = new_state;
 
-done:
-       _leave(" [->%s]", fscache_object_states[object->state]);
-       return;
+       if (state->work) {
+               if (unlikely(state->work == ((void *)2UL))) {
+                       _leave(" [dead]");
+                       return;
+               }
+               goto restart_masked;
+       }
 
-unsupported_event:
-       printk(KERN_ERR "FS-Cache:"
-              " Unsupported event %d [%lx/%lx] in state %s\n",
-              event, object->events, object->event_mask,
-              fscache_object_states[object->state]);
-       BUG();
+       /* Transited to wait state */
+       event_mask = object->oob_event_mask;
+       for (t = state->transitions; t->events; t++)
+               event_mask |= t->events;
+
+unmask_events:
+       object->event_mask = event_mask;
+       smp_mb();
+       events = object->events;
+       if (events & event_mask)
+               goto restart;
+       _leave(" [msk %lx]", event_mask);
 }
 
 /*
  * execute an object
  */
-void fscache_object_work_func(struct work_struct *work)
+static void fscache_object_work_func(struct work_struct *work)
 {
        struct fscache_object *object =
                container_of(work, struct fscache_object, work);
@@ -372,14 +272,70 @@ void fscache_object_work_func(struct work_struct *work)
        _enter("{OBJ%x}", object->debug_id);
 
        start = jiffies;
-       fscache_object_state_machine(object);
+       fscache_object_sm_dispatcher(object);
        fscache_hist(fscache_objs_histogram, start);
-       if (object->events & object->event_mask)
-               fscache_enqueue_object(object);
-       clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
        fscache_put_object(object);
 }
-EXPORT_SYMBOL(fscache_object_work_func);
+
+/**
+ * fscache_object_init - Initialise a cache object description
+ * @object: Object description
+ * @cookie: Cookie object will be attached to
+ * @cache: Cache in which backing object will be found
+ *
+ * Initialise a cache object description to its basic values.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+void fscache_object_init(struct fscache_object *object,
+                        struct fscache_cookie *cookie,
+                        struct fscache_cache *cache)
+{
+       const struct fscache_transition *t;
+
+       atomic_inc(&cache->object_count);
+
+       object->state = STATE(WAIT_FOR_INIT);
+       object->oob_table = fscache_osm_init_oob;
+       object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
+       spin_lock_init(&object->lock);
+       INIT_LIST_HEAD(&object->cache_link);
+       INIT_HLIST_NODE(&object->cookie_link);
+       INIT_WORK(&object->work, fscache_object_work_func);
+       INIT_LIST_HEAD(&object->dependents);
+       INIT_LIST_HEAD(&object->dep_link);
+       INIT_LIST_HEAD(&object->pending_ops);
+       object->n_children = 0;
+       object->n_ops = object->n_in_progress = object->n_exclusive = 0;
+       object->events = 0;
+       object->store_limit = 0;
+       object->store_limit_l = 0;
+       object->cache = cache;
+       object->cookie = cookie;
+       object->parent = NULL;
+
+       object->oob_event_mask = 0;
+       for (t = object->oob_table; t->events; t++)
+               object->oob_event_mask |= t->events;
+       object->event_mask = object->oob_event_mask;
+       for (t = object->state->transitions; t->events; t++)
+               object->event_mask |= t->events;
+}
+EXPORT_SYMBOL(fscache_object_init);
+
+/*
+ * Abort object initialisation before we start it.
+ */
+static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
+                                                               int event)
+{
+       _enter("{OBJ%x},%d", object->debug_id, event);
+
+       object->oob_event_mask = 0;
+       fscache_dequeue_object(object);
+       return transit_to(KILL_OBJECT);
+}
 
 /*
  * initialise an object
@@ -387,130 +343,136 @@ EXPORT_SYMBOL(fscache_object_work_func);
  *   immediately to do a creation
  * - we may need to start the process of creating a parent and we need to wait
  *   for the parent's lookup and creation to complete if it's not there yet
- * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the
- *   leaf-most cookies of the object and all its children
  */
-static void fscache_initialise_object(struct fscache_object *object)
+static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
+                                                            int event)
 {
        struct fscache_object *parent;
+       bool success;
 
-       _enter("");
-       ASSERT(object->cookie != NULL);
-       ASSERT(object->cookie->parent != NULL);
-
-       if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) |
-                             (1 << FSCACHE_OBJECT_EV_RELEASE) |
-                             (1 << FSCACHE_OBJECT_EV_RETIRE) |
-                             (1 << FSCACHE_OBJECT_EV_WITHDRAW))) {
-               _debug("abort init %lx", object->events);
-               spin_lock(&object->lock);
-               object->state = FSCACHE_OBJECT_ABORT_INIT;
-               spin_unlock(&object->lock);
-               return;
-       }
+       _enter("{OBJ%x},%d", object->debug_id, event);
 
-       spin_lock(&object->cookie->lock);
-       spin_lock_nested(&object->cookie->parent->lock, 1);
+       ASSERT(list_empty(&object->dep_link));
 
        parent = object->parent;
        if (!parent) {
-               _debug("no parent");
-               set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
-       } else {
-               spin_lock(&object->lock);
-               spin_lock_nested(&parent->lock, 1);
-               _debug("parent %s", fscache_object_states[parent->state]);
-
-               if (parent->state >= FSCACHE_OBJECT_DYING) {
-                       _debug("bad parent");
-                       set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
-               } else if (parent->state < FSCACHE_OBJECT_AVAILABLE) {
-                       _debug("wait");
-
-                       /* we may get woken up in this state by child objects
-                        * binding on to us, so we need to make sure we don't
-                        * add ourself to the list multiple times */
-                       if (list_empty(&object->dep_link)) {
-                               fscache_stat(&fscache_n_cop_grab_object);
-                               object->cache->ops->grab_object(object);
-                               fscache_stat_d(&fscache_n_cop_grab_object);
-                               list_add(&object->dep_link,
-                                        &parent->dependents);
-
-                               /* fscache_acquire_non_index_cookie() uses this
-                                * to wake the chain up */
-                               if (parent->state == FSCACHE_OBJECT_INIT)
-                                       fscache_enqueue_object(parent);
-                       }
-               } else {
-                       _debug("go");
-                       parent->n_ops++;
-                       parent->n_obj_ops++;
-                       object->lookup_jif = jiffies;
-                       object->state = FSCACHE_OBJECT_LOOKING_UP;
-                       set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
-               }
+               _leave(" [no parent]");
+               return transit_to(DROP_OBJECT);
+       }
 
-               spin_unlock(&parent->lock);
-               spin_unlock(&object->lock);
+       _debug("parent: %s of:%lx", parent->state->name, parent->flags);
+
+       if (fscache_object_is_dying(parent)) {
+               _leave(" [bad parent]");
+               return transit_to(DROP_OBJECT);
        }
 
-       spin_unlock(&object->cookie->parent->lock);
-       spin_unlock(&object->cookie->lock);
+       if (fscache_object_is_available(parent)) {
+               _leave(" [ready]");
+               return transit_to(PARENT_READY);
+       }
+
+       _debug("wait");
+
+       spin_lock(&parent->lock);
+       fscache_stat(&fscache_n_cop_grab_object);
+       success = false;
+       if (fscache_object_is_live(parent) &&
+           object->cache->ops->grab_object(object)) {
+               list_add(&object->dep_link, &parent->dependents);
+               success = true;
+       }
+       fscache_stat_d(&fscache_n_cop_grab_object);
+       spin_unlock(&parent->lock);
+       if (!success) {
+               _leave(" [grab failed]");
+               return transit_to(DROP_OBJECT);
+       }
+
+       /* fscache_acquire_non_index_cookie() uses this
+        * to wake the chain up */
+       fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
+       _leave(" [wait]");
+       return transit_to(WAIT_FOR_PARENT);
+}
+
+/*
+ * Once the parent object is ready, we should kick off our lookup op.
+ */
+static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
+                                                       int event)
+{
+       struct fscache_object *parent = object->parent;
+
+       _enter("{OBJ%x},%d", object->debug_id, event);
+
+       ASSERT(parent != NULL);
+
+       spin_lock(&parent->lock);
+       parent->n_ops++;
+       parent->n_obj_ops++;
+       object->lookup_jif = jiffies;
+       spin_unlock(&parent->lock);
+
        _leave("");
+       return transit_to(LOOK_UP_OBJECT);
 }
 
 /*
  * look an object up in the cache from which it was allocated
  * - we hold an "access lock" on the parent object, so the parent object cannot
  *   be withdrawn by either party till we've finished
- * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the
- *   leaf-most cookies of the object and all its children
  */
-static void fscache_lookup_object(struct fscache_object *object)
+static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
+                                                         int event)
 {
        struct fscache_cookie *cookie = object->cookie;
-       struct fscache_object *parent;
+       struct fscache_object *parent = object->parent;
        int ret;
 
-       _enter("");
+       _enter("{OBJ%x},%d", object->debug_id, event);
+
+       object->oob_table = fscache_osm_lookup_oob;
 
-       parent = object->parent;
        ASSERT(parent != NULL);
        ASSERTCMP(parent->n_ops, >, 0);
        ASSERTCMP(parent->n_obj_ops, >, 0);
 
        /* make sure the parent is still available */
-       ASSERTCMP(parent->state, >=, FSCACHE_OBJECT_AVAILABLE);
-
-       if (parent->state >= FSCACHE_OBJECT_DYING ||
-           test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
-               _debug("unavailable");
-               set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
-               _leave("");
-               return;
+       ASSERT(fscache_object_is_available(parent));
+
+       if (fscache_object_is_dying(parent) ||
+           test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
+           !fscache_use_cookie(object)) {
+               _leave(" [unavailable]");
+               return transit_to(LOOKUP_FAILURE);
        }
 
-       _debug("LOOKUP \"%s/%s\" in \"%s\"",
-              parent->cookie->def->name, cookie->def->name,
-              object->cache->tag->name);
+       _debug("LOOKUP \"%s\" in \"%s\"",
+              cookie->def->name, object->cache->tag->name);
 
        fscache_stat(&fscache_n_object_lookups);
        fscache_stat(&fscache_n_cop_lookup_object);
        ret = object->cache->ops->lookup_object(object);
        fscache_stat_d(&fscache_n_cop_lookup_object);
 
-       if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
-               set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
+       fscache_unuse_cookie(object);
 
        if (ret == -ETIMEDOUT) {
                /* probably stuck behind another object, so move this one to
                 * the back of the queue */
                fscache_stat(&fscache_n_object_lookups_timed_out);
-               set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+               _leave(" [timeout]");
+               return NO_TRANSIT;
        }
 
-       _leave("");
+       if (ret < 0) {
+               _leave(" [error]");
+               return transit_to(LOOKUP_FAILURE);
+       }
+
+       _leave(" [ok]");
+       return transit_to(OBJECT_AVAILABLE);
 }
 
 /**
@@ -524,32 +486,20 @@ void fscache_object_lookup_negative(struct fscache_object *object)
 {
        struct fscache_cookie *cookie = object->cookie;
 
-       _enter("{OBJ%x,%s}",
-              object->debug_id, fscache_object_states[object->state]);
+       _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
 
-       spin_lock(&object->lock);
-       if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+       if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
                fscache_stat(&fscache_n_object_lookups_negative);
 
-               /* transit here to allow write requests to begin stacking up
-                * and read requests to begin returning ENODATA */
-               object->state = FSCACHE_OBJECT_CREATING;
-               spin_unlock(&object->lock);
-
-               set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags);
+               /* Allow write requests to begin stacking up and read requests to begin
+                * returning ENODATA.
+                */
                set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
 
                _debug("wake up lookup %p", &cookie->flags);
-               smp_mb__before_clear_bit();
-               clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
-               smp_mb__after_clear_bit();
+               clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
                wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
-               set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
-       } else {
-               ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
-               spin_unlock(&object->lock);
        }
-
        _leave("");
 }
 EXPORT_SYMBOL(fscache_object_lookup_negative);
@@ -568,38 +518,26 @@ void fscache_obtained_object(struct fscache_object *object)
 {
        struct fscache_cookie *cookie = object->cookie;
 
-       _enter("{OBJ%x,%s}",
-              object->debug_id, fscache_object_states[object->state]);
+       _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
 
        /* if we were still looking up, then we must have a positive lookup
         * result, in which case there may be data available */
-       spin_lock(&object->lock);
-       if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+       if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
                fscache_stat(&fscache_n_object_lookups_positive);
 
-               clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+               /* We do (presumably) have data */
+               clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
 
-               object->state = FSCACHE_OBJECT_AVAILABLE;
-               spin_unlock(&object->lock);
-
-               smp_mb__before_clear_bit();
-               clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
-               smp_mb__after_clear_bit();
+               /* Allow write requests to begin stacking up and read requests
+                * to begin shovelling data.
+                */
+               clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
                wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
-               set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
        } else {
-               ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
                fscache_stat(&fscache_n_object_created);
-
-               object->state = FSCACHE_OBJECT_AVAILABLE;
-               spin_unlock(&object->lock);
-               set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
-               smp_wmb();
        }
 
-       if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags))
-               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING);
-
+       set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
        _leave("");
 }
 EXPORT_SYMBOL(fscache_obtained_object);
@@ -607,15 +545,14 @@ EXPORT_SYMBOL(fscache_obtained_object);
 /*
  * handle an object that has just become available
  */
-static void fscache_object_available(struct fscache_object *object)
+static const struct fscache_state *fscache_object_available(struct fscache_object *object,
+                                                           int event)
 {
-       _enter("{OBJ%x}", object->debug_id);
+       _enter("{OBJ%x},%d", object->debug_id, event);
 
-       spin_lock(&object->lock);
+       object->oob_table = fscache_osm_run_oob;
 
-       if (object->cookie &&
-           test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
-               wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
+       spin_lock(&object->lock);
 
        fscache_done_parent_op(object);
        if (object->n_in_progress == 0) {
@@ -631,130 +568,158 @@ static void fscache_object_available(struct fscache_object *object)
        fscache_stat(&fscache_n_cop_lookup_complete);
        object->cache->ops->lookup_complete(object);
        fscache_stat_d(&fscache_n_cop_lookup_complete);
-       fscache_enqueue_dependents(object);
 
        fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
        fscache_stat(&fscache_n_object_avail);
 
        _leave("");
+       return transit_to(JUMPSTART_DEPS);
 }
 
 /*
- * drop an object's attachments
+ * Wake up this object's dependent objects now that we've become available.
  */
-static void fscache_drop_object(struct fscache_object *object)
+static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
+                                                               int event)
 {
-       struct fscache_object *parent = object->parent;
-       struct fscache_cache *cache = object->cache;
+       _enter("{OBJ%x},%d", object->debug_id, event);
 
-       _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
+       if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
+               return NO_TRANSIT; /* Not finished; requeue */
+       return transit_to(WAIT_FOR_CMD);
+}
 
-       ASSERTCMP(object->cookie, ==, NULL);
-       ASSERT(hlist_unhashed(&object->cookie_link));
+/*
+ * Handle lookup or creation failute.
+ */
+static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
+                                                         int event)
+{
+       struct fscache_cookie *cookie;
 
-       spin_lock(&cache->object_list_lock);
-       list_del_init(&object->cache_link);
-       spin_unlock(&cache->object_list_lock);
+       _enter("{OBJ%x},%d", object->debug_id, event);
 
-       fscache_stat(&fscache_n_cop_drop_object);
-       cache->ops->drop_object(object);
-       fscache_stat_d(&fscache_n_cop_drop_object);
+       object->oob_event_mask = 0;
 
-       if (parent) {
-               _debug("release parent OBJ%x {%d}",
-                      parent->debug_id, parent->n_children);
+       fscache_stat(&fscache_n_cop_lookup_complete);
+       object->cache->ops->lookup_complete(object);
+       fscache_stat_d(&fscache_n_cop_lookup_complete);
 
-               spin_lock(&parent->lock);
-               parent->n_children--;
-               if (parent->n_children == 0)
-                       fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
-               spin_unlock(&parent->lock);
-               object->parent = NULL;
+       cookie = object->cookie;
+       set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
+       if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+
+       fscache_done_parent_op(object);
+       return transit_to(KILL_OBJECT);
+}
+
+/*
+ * Wait for completion of all active operations on this object and the death of
+ * all child objects of this object.
+ */
+static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
+                                                      int event)
+{
+       _enter("{OBJ%x,%d,%d},%d",
+              object->debug_id, object->n_ops, object->n_children, event);
+
+       clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+       object->oob_event_mask = 0;
+
+       if (list_empty(&object->dependents) &&
+           object->n_ops == 0 &&
+           object->n_children == 0)
+               return transit_to(DROP_OBJECT);
+
+       if (object->n_in_progress == 0) {
+               spin_lock(&object->lock);
+               if (object->n_ops > 0 && object->n_in_progress == 0)
+                       fscache_start_operations(object);
+               spin_unlock(&object->lock);
        }
 
-       /* this just shifts the object release to the work processor */
-       fscache_put_object(object);
+       if (!list_empty(&object->dependents))
+               return transit_to(KILL_DEPENDENTS);
 
-       _leave("");
+       return transit_to(WAIT_FOR_CLEARANCE);
 }
 
 /*
- * release or recycle an object that the netfs has discarded
+ * Kill dependent objects.
  */
-static void fscache_release_object(struct fscache_object *object)
+static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
+                                                          int event)
 {
-       _enter("");
+       _enter("{OBJ%x},%d", object->debug_id, event);
 
-       fscache_drop_object(object);
+       if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
+               return NO_TRANSIT; /* Not finished */
+       return transit_to(WAIT_FOR_CLEARANCE);
 }
 
 /*
- * withdraw an object from active service
+ * Drop an object's attachments
  */
-static void fscache_withdraw_object(struct fscache_object *object)
+static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
+                                                      int event)
 {
-       struct fscache_cookie *cookie;
-       bool detached;
+       struct fscache_object *parent = object->parent;
+       struct fscache_cookie *cookie = object->cookie;
+       struct fscache_cache *cache = object->cache;
+       bool awaken = false;
 
-       _enter("");
+       _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
 
-       spin_lock(&object->lock);
-       cookie = object->cookie;
-       if (cookie) {
-               /* need to get the cookie lock before the object lock, starting
-                * from the object pointer */
-               atomic_inc(&cookie->usage);
-               spin_unlock(&object->lock);
+       ASSERT(cookie != NULL);
+       ASSERT(!hlist_unhashed(&object->cookie_link));
 
-               detached = false;
-               spin_lock(&cookie->lock);
-               spin_lock(&object->lock);
+       /* Make sure the cookie no longer points here and that the netfs isn't
+        * waiting for us.
+        */
+       spin_lock(&cookie->lock);
+       hlist_del_init(&object->cookie_link);
+       if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+               awaken = true;
+       spin_unlock(&cookie->lock);
 
-               if (object->cookie == cookie) {
-                       hlist_del_init(&object->cookie_link);
-                       object->cookie = NULL;
-                       fscache_invalidation_complete(cookie);
-                       detached = true;
-               }
-               spin_unlock(&cookie->lock);
-               fscache_cookie_put(cookie);
-               if (detached)
-                       fscache_cookie_put(cookie);
-       }
+       if (awaken)
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
 
+       /* Prevent a race with our last child, which has to signal EV_CLEARED
+        * before dropping our spinlock.
+        */
+       spin_lock(&object->lock);
        spin_unlock(&object->lock);
 
-       fscache_drop_object(object);
-}
+       /* Discard from the cache's collection of objects */
+       spin_lock(&cache->object_list_lock);
+       list_del_init(&object->cache_link);
+       spin_unlock(&cache->object_list_lock);
 
-/*
- * withdraw an object from active service at the behest of the cache
- * - need break the links to a cached object cookie
- * - called under two situations:
- *   (1) recycler decides to reclaim an in-use object
- *   (2) a cache is unmounted
- * - have to take care as the cookie can be being relinquished by the netfs
- *   simultaneously
- * - the object is pinned by the caller holding a refcount on it
- */
-void fscache_withdrawing_object(struct fscache_cache *cache,
-                               struct fscache_object *object)
-{
-       bool enqueue = false;
+       fscache_stat(&fscache_n_cop_drop_object);
+       cache->ops->drop_object(object);
+       fscache_stat_d(&fscache_n_cop_drop_object);
 
-       _enter(",OBJ%x", object->debug_id);
+       /* The parent object wants to know when all it dependents have gone */
+       if (parent) {
+               _debug("release parent OBJ%x {%d}",
+                      parent->debug_id, parent->n_children);
 
-       spin_lock(&object->lock);
-       if (object->state < FSCACHE_OBJECT_WITHDRAWING) {
-               object->state = FSCACHE_OBJECT_WITHDRAWING;
-               enqueue = true;
+               spin_lock(&parent->lock);
+               parent->n_children--;
+               if (parent->n_children == 0)
+                       fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
+               spin_unlock(&parent->lock);
+               object->parent = NULL;
        }
-       spin_unlock(&object->lock);
 
-       if (enqueue)
-               fscache_enqueue_object(object);
+       /* this just shifts the object release to the work processor */
+       fscache_put_object(object);
+       fscache_stat(&fscache_n_object_dead);
 
        _leave("");
+       return transit_to(OBJECT_DEAD);
 }
 
 /*
@@ -771,7 +736,7 @@ static int fscache_get_object(struct fscache_object *object)
 }
 
 /*
- * discard a ref on a work item
+ * Discard a ref on an object
  */
 static void fscache_put_object(struct fscache_object *object)
 {
@@ -780,6 +745,22 @@ static void fscache_put_object(struct fscache_object *object)
        fscache_stat_d(&fscache_n_cop_put_object);
 }
 
+/**
+ * fscache_object_destroy - Note that a cache object is about to be destroyed
+ * @object: The object to be destroyed
+ *
+ * Note the imminent destruction and deallocation of a cache object record.
+ */
+void fscache_object_destroy(struct fscache_object *object)
+{
+       fscache_objlist_remove(object);
+
+       /* We can get rid of the cookie now */
+       fscache_cookie_put(object->cookie);
+       object->cookie = NULL;
+}
+EXPORT_SYMBOL(fscache_object_destroy);
+
 /*
  * enqueue an object for metadata-type processing
  */
@@ -803,7 +784,7 @@ void fscache_enqueue_object(struct fscache_object *object)
 
 /**
  * fscache_object_sleep_till_congested - Sleep until object wq is congested
- * @timoutp: Scheduler sleep timeout
+ * @timeoutp: Scheduler sleep timeout
  *
  * Allow an object handler to sleep until the object workqueue is congested.
  *
@@ -831,18 +812,21 @@ bool fscache_object_sleep_till_congested(signed long *timeoutp)
 EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
 
 /*
- * enqueue the dependents of an object for metadata-type processing
- * - the caller must hold the object's lock
- * - this may cause an already locked object to wind up being processed again
+ * Enqueue the dependents of an object for metadata-type processing.
+ *
+ * If we don't manage to finish the list before the scheduler wants to run
+ * again then return false immediately.  We return true if the list was
+ * cleared.
  */
-static void fscache_enqueue_dependents(struct fscache_object *object)
+static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
 {
        struct fscache_object *dep;
+       bool ret = true;
 
        _enter("{OBJ%x}", object->debug_id);
 
        if (list_empty(&object->dependents))
-               return;
+               return true;
 
        spin_lock(&object->lock);
 
@@ -851,23 +835,23 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
                                 struct fscache_object, dep_link);
                list_del_init(&dep->dep_link);
 
-
-               /* sort onto appropriate lists */
-               fscache_enqueue_object(dep);
+               fscache_raise_event(dep, event);
                fscache_put_object(dep);
 
-               if (!list_empty(&object->dependents))
-                       cond_resched_lock(&object->lock);
+               if (!list_empty(&object->dependents) && need_resched()) {
+                       ret = false;
+                       break;
+               }
        }
 
        spin_unlock(&object->lock);
+       return ret;
 }
 
 /*
  * remove an object from whatever queue it's waiting on
- * - the caller must hold object->lock
  */
-void fscache_dequeue_object(struct fscache_object *object)
+static void fscache_dequeue_object(struct fscache_object *object)
 {
        _enter("{OBJ%x}", object->debug_id);
 
@@ -886,7 +870,10 @@ void fscache_dequeue_object(struct fscache_object *object)
  * @data: The auxiliary data for the object
  * @datalen: The size of the auxiliary data
  *
- * This function consults the netfs about the coherency state of an object
+ * This function consults the netfs about the coherency state of an object.
+ * The caller must be holding a ref on cookie->n_active (held by
+ * fscache_look_up_object() on behalf of the cache backend during object lookup
+ * and creation).
  */
 enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
                                        const void *data, uint16_t datalen)
@@ -927,12 +914,23 @@ EXPORT_SYMBOL(fscache_check_aux);
 /*
  * Asynchronously invalidate an object.
  */
-static void fscache_invalidate_object(struct fscache_object *object)
+static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
+                                                             int event)
 {
        struct fscache_operation *op;
        struct fscache_cookie *cookie = object->cookie;
 
-       _enter("{OBJ%x}", object->debug_id);
+       _enter("{OBJ%x},%d", object->debug_id, event);
+
+       /* We're going to need the cookie.  If the cookie is not available then
+        * retire the object instead.
+        */
+       if (!fscache_use_cookie(object)) {
+               ASSERT(object->cookie->stores.rnode == NULL);
+               set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+               _leave(" [no cookie]");
+               return transit_to(KILL_OBJECT);
+       }
 
        /* Reject any new read/write ops and abort any that are pending. */
        fscache_invalidate_writes(cookie);
@@ -941,14 +939,13 @@ static void fscache_invalidate_object(struct fscache_object *object)
 
        /* Now we have to wait for in-progress reads and writes */
        op = kzalloc(sizeof(*op), GFP_KERNEL);
-       if (!op) {
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
-               _leave(" [ENOMEM]");
-               return;
-       }
+       if (!op)
+               goto nomem;
 
        fscache_operation_init(op, object->cache->ops->invalidate_object, NULL);
-       op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
+       op->flags = FSCACHE_OP_ASYNC |
+               (1 << FSCACHE_OP_EXCLUSIVE) |
+               (1 << FSCACHE_OP_UNUSE_COOKIE);
 
        spin_lock(&cookie->lock);
        if (fscache_submit_exclusive_op(object, op) < 0)
@@ -965,13 +962,50 @@ static void fscache_invalidate_object(struct fscache_object *object)
        /* We can allow read and write requests to come in once again.  They'll
         * queue up behind our exclusive invalidation operation.
         */
-       fscache_invalidation_complete(cookie);
-       _leave("");
-       return;
+       if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
+       _leave(" [ok]");
+       return transit_to(UPDATE_OBJECT);
+
+nomem:
+       clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+       fscache_unuse_cookie(object);
+       _leave(" [ENOMEM]");
+       return transit_to(KILL_OBJECT);
 
 submit_op_failed:
+       clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
        spin_unlock(&cookie->lock);
        kfree(op);
-       fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
        _leave(" [EIO]");
+       return transit_to(KILL_OBJECT);
+}
+
+static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
+                                                            int event)
+{
+       const struct fscache_state *s;
+
+       fscache_stat(&fscache_n_invalidates_run);
+       fscache_stat(&fscache_n_cop_invalidate_object);
+       s = _fscache_invalidate_object(object, event);
+       fscache_stat_d(&fscache_n_cop_invalidate_object);
+       return s;
+}
+
+/*
+ * Asynchronously update an object.
+ */
+static const struct fscache_state *fscache_update_object(struct fscache_object *object,
+                                                        int event)
+{
+       _enter("{OBJ%x},%d", object->debug_id, event);
+
+       fscache_stat(&fscache_n_updates_run);
+       fscache_stat(&fscache_n_cop_update_object);
+       object->cache->ops->update_object(object);
+       fscache_stat_d(&fscache_n_cop_update_object);
+
+       _leave("");
+       return transit_to(WAIT_FOR_CMD);
 }
index 762a9ec4ffa4739686e41cd3f8521765ca6e9719..318071aca217e54385044afd7c9d6407c70fde4c 100644 (file)
@@ -35,7 +35,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
 
        ASSERT(list_empty(&op->pend_link));
        ASSERT(op->processor != NULL);
-       ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
+       ASSERT(fscache_object_is_available(op->object));
        ASSERTCMP(atomic_read(&op->usage), >, 0);
        ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
 
@@ -119,7 +119,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
                /* need to issue a new write op after this */
                clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
                ret = 0;
-       } else if (object->state == FSCACHE_OBJECT_CREATING) {
+       } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
                op->object = object;
                object->n_ops++;
                object->n_exclusive++;  /* reads and writes must wait */
@@ -144,7 +144,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
  */
 static void fscache_report_unexpected_submission(struct fscache_object *object,
                                                 struct fscache_operation *op,
-                                                unsigned long ostate)
+                                                const struct fscache_state *ostate)
 {
        static bool once_only;
        struct fscache_operation *p;
@@ -155,11 +155,8 @@ static void fscache_report_unexpected_submission(struct fscache_object *object,
        once_only = true;
 
        kdebug("unexpected submission OP%x [OBJ%x %s]",
-              op->debug_id, object->debug_id,
-              fscache_object_states[object->state]);
-       kdebug("objstate=%s [%s]",
-              fscache_object_states[object->state],
-              fscache_object_states[ostate]);
+              op->debug_id, object->debug_id, object->state->name);
+       kdebug("objstate=%s [%s]", object->state->name, ostate->name);
        kdebug("objflags=%lx", object->flags);
        kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
        kdebug("ops=%u inp=%u exc=%u",
@@ -190,7 +187,7 @@ static void fscache_report_unexpected_submission(struct fscache_object *object,
 int fscache_submit_op(struct fscache_object *object,
                      struct fscache_operation *op)
 {
-       unsigned long ostate;
+       const struct fscache_state *ostate;
        int ret;
 
        _enter("{OBJ%x OP%x},{%u}",
@@ -226,16 +223,14 @@ int fscache_submit_op(struct fscache_object *object,
                        fscache_run_op(object, op);
                }
                ret = 0;
-       } else if (object->state == FSCACHE_OBJECT_CREATING) {
+       } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
                op->object = object;
                object->n_ops++;
                atomic_inc(&op->usage);
                list_add_tail(&op->pend_link, &object->pending_ops);
                fscache_stat(&fscache_n_op_pend);
                ret = 0;
-       } else if (object->state == FSCACHE_OBJECT_DYING ||
-                  object->state == FSCACHE_OBJECT_LC_DYING ||
-                  object->state == FSCACHE_OBJECT_WITHDRAWING) {
+       } else if (fscache_object_is_dying(object)) {
                fscache_stat(&fscache_n_op_rejected);
                op->state = FSCACHE_OP_ST_CANCELLED;
                ret = -ENOBUFS;
@@ -265,8 +260,8 @@ void fscache_abort_object(struct fscache_object *object)
 }
 
 /*
- * jump start the operation processing on an object
- * - caller must hold object->lock
+ * Jump start the operation processing on an object.  The caller must hold
+ * object->lock.
  */
 void fscache_start_operations(struct fscache_object *object)
 {
@@ -428,14 +423,10 @@ void fscache_put_operation(struct fscache_operation *op)
 
        object = op->object;
 
-       if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) {
-               if (atomic_dec_and_test(&object->n_reads)) {
-                       clear_bit(FSCACHE_COOKIE_WAITING_ON_READS,
-                                 &object->cookie->flags);
-                       wake_up_bit(&object->cookie->flags,
-                                   FSCACHE_COOKIE_WAITING_ON_READS);
-               }
-       }
+       if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
+               atomic_dec(&object->n_reads);
+       if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
+               fscache_unuse_cookie(object);
 
        /* now... we may get called with the object spinlock held, so we
         * complete the cleanup here only if we can immediately acquire the
index ff000e52072d171258b0bb59e5e0efd560329ce9..d479ab3c63e487ba097ff2b865c34401a9fcbfcb 100644 (file)
@@ -109,7 +109,7 @@ page_busy:
         * allocator as the work threads writing to the cache may all end up
         * sleeping on memory allocation, so we may need to impose a timeout
         * too. */
-       if (!(gfp & __GFP_WAIT)) {
+       if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
                fscache_stat(&fscache_n_store_vmscan_busy);
                return false;
        }
@@ -163,10 +163,12 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
 
        fscache_stat(&fscache_n_attr_changed_calls);
 
-       if (fscache_object_is_active(object)) {
+       if (fscache_object_is_active(object) &&
+           fscache_use_cookie(object)) {
                fscache_stat(&fscache_n_cop_attr_changed);
                ret = object->cache->ops->attr_changed(object);
                fscache_stat_d(&fscache_n_cop_attr_changed);
+               fscache_unuse_cookie(object);
                if (ret < 0)
                        fscache_abort_object(object);
        }
@@ -233,7 +235,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
 
        _enter("{OP%x}", op->op.debug_id);
 
-       ASSERTCMP(op->n_pages, ==, 0);
+       ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
 
        fscache_hist(fscache_retrieval_histogram, op->start_time);
        if (op->context)
@@ -246,6 +248,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
  * allocate a retrieval op
  */
 static struct fscache_retrieval *fscache_alloc_retrieval(
+       struct fscache_cookie *cookie,
        struct address_space *mapping,
        fscache_rw_complete_t end_io_func,
        void *context)
@@ -260,7 +263,10 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
        }
 
        fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
-       op->op.flags    = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
+       atomic_inc(&cookie->n_active);
+       op->op.flags    = FSCACHE_OP_MYTHREAD |
+               (1UL << FSCACHE_OP_WAITING) |
+               (1UL << FSCACHE_OP_UNUSE_COOKIE);
        op->mapping     = mapping;
        op->end_io_func = end_io_func;
        op->context     = context;
@@ -310,7 +316,7 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
        struct fscache_retrieval *op =
                container_of(_op, struct fscache_retrieval, op);
 
-       op->n_pages = 0;
+       atomic_set(&op->n_pages, 0);
 }
 
 /*
@@ -394,12 +400,13 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
        if (fscache_wait_for_deferred_lookup(cookie) < 0)
                return -ERESTARTSYS;
 
-       op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
+       op = fscache_alloc_retrieval(cookie, page->mapping,
+                                    end_io_func,context);
        if (!op) {
                _leave(" = -ENOMEM");
                return -ENOMEM;
        }
-       op->n_pages = 1;
+       atomic_set(&op->n_pages, 1);
 
        spin_lock(&cookie->lock);
 
@@ -408,7 +415,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
-       ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
+       ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
 
        atomic_inc(&object->n_reads);
        __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
@@ -465,6 +472,7 @@ nobufs_unlock_dec:
        atomic_dec(&object->n_reads);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
+       atomic_dec(&cookie->n_active);
        kfree(op);
 nobufs:
        fscache_stat(&fscache_n_retrievals_nobufs);
@@ -522,10 +530,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
        if (fscache_wait_for_deferred_lookup(cookie) < 0)
                return -ERESTARTSYS;
 
-       op = fscache_alloc_retrieval(mapping, end_io_func, context);
+       op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
        if (!op)
                return -ENOMEM;
-       op->n_pages = *nr_pages;
+       atomic_set(&op->n_pages, *nr_pages);
 
        spin_lock(&cookie->lock);
 
@@ -589,6 +597,7 @@ nobufs_unlock_dec:
        atomic_dec(&object->n_reads);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
+       atomic_dec(&cookie->n_active);
        kfree(op);
 nobufs:
        fscache_stat(&fscache_n_retrievals_nobufs);
@@ -631,10 +640,10 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
        if (fscache_wait_for_deferred_lookup(cookie) < 0)
                return -ERESTARTSYS;
 
-       op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
+       op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
        if (!op)
                return -ENOMEM;
-       op->n_pages = 1;
+       atomic_set(&op->n_pages, 1);
 
        spin_lock(&cookie->lock);
 
@@ -675,6 +684,7 @@ error:
 
 nobufs_unlock:
        spin_unlock(&cookie->lock);
+       atomic_dec(&cookie->n_active);
        kfree(op);
 nobufs:
        fscache_stat(&fscache_n_allocs_nobufs);
@@ -729,8 +739,9 @@ static void fscache_write_op(struct fscache_operation *_op)
                 */
                spin_unlock(&object->lock);
                fscache_op_complete(&op->op, false);
-               _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}",
-                      _op->flags, _op->state, object->state, object->flags);
+               _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
+                      _op->flags, _op->state, object->state->short_name,
+                      object->flags);
                return;
        }
 
@@ -796,11 +807,16 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
 
        _enter("");
 
-       while (spin_lock(&cookie->stores_lock),
-              n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-                                             ARRAY_SIZE(results),
-                                             FSCACHE_COOKIE_PENDING_TAG),
-              n > 0) {
+       for (;;) {
+               spin_lock(&cookie->stores_lock);
+               n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
+                                              ARRAY_SIZE(results),
+                                              FSCACHE_COOKIE_PENDING_TAG);
+               if (n == 0) {
+                       spin_unlock(&cookie->stores_lock);
+                       break;
+               }
+
                for (i = n - 1; i >= 0; i--) {
                        page = results[i];
                        radix_tree_delete(&cookie->stores, page->index);
@@ -812,7 +828,6 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
                        page_cache_release(results[i]);
        }
 
-       spin_unlock(&cookie->stores_lock);
        _leave("");
 }
 
@@ -829,14 +844,12 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  *      set)
  *
- *     (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
- *         fill op)
+ *     (a) no writes yet
  *
  *     (b) writes deferred till post-creation (mark page for writing and
  *         return immediately)
  *
  *  (2) negative lookup, object created, initial fill being made from netfs
- *      (FSCACHE_COOKIE_INITIAL_FILL is set)
  *
  *     (a) fill point not yet reached this page (mark page for writing and
  *          return)
@@ -873,7 +886,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 
        fscache_operation_init(&op->op, fscache_write_op,
                               fscache_release_write_op);
-       op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
+       op->op.flags = FSCACHE_OP_ASYNC |
+               (1 << FSCACHE_OP_WAITING) |
+               (1 << FSCACHE_OP_UNUSE_COOKIE);
 
        ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
        if (ret < 0)
@@ -919,6 +934,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
        op->store_limit = object->store_limit;
 
+       atomic_inc(&cookie->n_active);
        if (fscache_submit_op(object, &op->op) < 0)
                goto submit_failed;
 
@@ -945,6 +961,7 @@ already_pending:
        return 0;
 
 submit_failed:
+       atomic_dec(&cookie->n_active);
        spin_lock(&cookie->stores_lock);
        radix_tree_delete(&cookie->stores, page->index);
        spin_unlock(&cookie->stores_lock);
index f3f783dc4f7509096235c7f8f6ed468fe51ddded..0eda52738ec4d0fc6cac460fab9d4baddde4ff4b 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/namei.h>
 #include <linux/slab.h>
 
-static bool fuse_use_readdirplus(struct inode *dir, struct file *filp)
+static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
 {
        struct fuse_conn *fc = get_fuse_conn(dir);
        struct fuse_inode *fi = get_fuse_inode(dir);
@@ -25,7 +25,7 @@ static bool fuse_use_readdirplus(struct inode *dir, struct file *filp)
                return true;
        if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
                return true;
-       if (filp->f_pos == 0)
+       if (ctx->pos == 0)
                return true;
        return false;
 }
@@ -1165,25 +1165,23 @@ static int fuse_permission(struct inode *inode, int mask)
 }
 
 static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
-                        void *dstbuf, filldir_t filldir)
+                        struct dir_context *ctx)
 {
        while (nbytes >= FUSE_NAME_OFFSET) {
                struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
                size_t reclen = FUSE_DIRENT_SIZE(dirent);
-               int over;
                if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
                        return -EIO;
                if (reclen > nbytes)
                        break;
 
-               over = filldir(dstbuf, dirent->name, dirent->namelen,
-                              file->f_pos, dirent->ino, dirent->type);
-               if (over)
+               if (!dir_emit(ctx, dirent->name, dirent->namelen,
+                              dirent->ino, dirent->type))
                        break;
 
                buf += reclen;
                nbytes -= reclen;
-               file->f_pos = dirent->off;
+               ctx->pos = dirent->off;
        }
 
        return 0;
@@ -1284,7 +1282,7 @@ out:
 }
 
 static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
-                            void *dstbuf, filldir_t filldir, u64 attr_version)
+                            struct dir_context *ctx, u64 attr_version)
 {
        struct fuse_direntplus *direntplus;
        struct fuse_dirent *dirent;
@@ -1309,10 +1307,9 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
                           we need to send a FORGET for each of those
                           which we did not link.
                        */
-                       over = filldir(dstbuf, dirent->name, dirent->namelen,
-                                      file->f_pos, dirent->ino,
-                                      dirent->type);
-                       file->f_pos = dirent->off;
+                       over = !dir_emit(ctx, dirent->name, dirent->namelen,
+                                      dirent->ino, dirent->type);
+                       ctx->pos = dirent->off;
                }
 
                buf += reclen;
@@ -1326,7 +1323,7 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
        return 0;
 }
 
-static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
+static int fuse_readdir(struct file *file, struct dir_context *ctx)
 {
        int plus, err;
        size_t nbytes;
@@ -1349,17 +1346,17 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
                return -ENOMEM;
        }
 
-       plus = fuse_use_readdirplus(inode, file);
+       plus = fuse_use_readdirplus(inode, ctx);
        req->out.argpages = 1;
        req->num_pages = 1;
        req->pages[0] = page;
        req->page_descs[0].length = PAGE_SIZE;
        if (plus) {
                attr_version = fuse_get_attr_version(fc);
-               fuse_read_fill(req, file, file->f_pos, PAGE_SIZE,
+               fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
                               FUSE_READDIRPLUS);
        } else {
-               fuse_read_fill(req, file, file->f_pos, PAGE_SIZE,
+               fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
                               FUSE_READDIR);
        }
        fuse_request_send(fc, req);
@@ -1369,11 +1366,11 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
        if (!err) {
                if (plus) {
                        err = parse_dirplusfile(page_address(page), nbytes,
-                                               file, dstbuf, filldir,
+                                               file, ctx,
                                                attr_version);
                } else {
                        err = parse_dirfile(page_address(page), nbytes, file,
-                                           dstbuf, filldir);
+                                           ctx);
                }
        }
 
@@ -1886,7 +1883,7 @@ static const struct inode_operations fuse_dir_inode_operations = {
 static const struct file_operations fuse_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = fuse_readdir,
+       .iterate        = fuse_readdir,
        .open           = fuse_dir_open,
        .release        = fuse_dir_release,
        .fsync          = fuse_dir_fsync,
index e570081f9f76be0be1bc05deb9b260101d018b51..35f2810331427b9c4b660cc6b83ec1b889d6910b 100644 (file)
@@ -2470,13 +2470,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
                .mode = mode
        };
        int err;
+       bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
+                          (mode & FALLOC_FL_PUNCH_HOLE);
 
        if (fc->no_fallocate)
                return -EOPNOTSUPP;
 
-       if (mode & FALLOC_FL_PUNCH_HOLE) {
+       if (lock_inode) {
                mutex_lock(&inode->i_mutex);
-               fuse_set_nowrite(inode);
+               if (mode & FALLOC_FL_PUNCH_HOLE)
+                       fuse_set_nowrite(inode);
        }
 
        req = fuse_get_req_nopages(fc);
@@ -2511,8 +2514,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        fuse_invalidate_attr(inode);
 
 out:
-       if (mode & FALLOC_FL_PUNCH_HOLE) {
-               fuse_release_nowrite(inode);
+       if (lock_inode) {
+               if (mode & FALLOC_FL_PUNCH_HOLE)
+                       fuse_release_nowrite(inode);
                mutex_unlock(&inode->i_mutex);
        }
 
index 0bad69ed6336e2e1450862f90a886669c16057b3..ee48ad37d9c0109dfd81bcca983c13a29492f0cb 100644 (file)
@@ -110,7 +110,7 @@ static int gfs2_writepage_common(struct page *page,
        /* Is the page fully outside i_size? (truncate in progress) */
        offset = i_size & (PAGE_CACHE_SIZE-1);
        if (page->index > end_index || (page->index == end_index && !offset)) {
-               page->mapping->a_ops->invalidatepage(page, 0);
+               page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
                goto out;
        }
        return 1;
@@ -299,7 +299,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
 
                /* Is the page fully outside i_size? (truncate in progress) */
                if (page->index > end_index || (page->index == end_index && !offset)) {
-                       page->mapping->a_ops->invalidatepage(page, 0);
+                       page->mapping->a_ops->invalidatepage(page, 0,
+                                                            PAGE_CACHE_SIZE);
                        unlock_page(page);
                        continue;
                }
@@ -943,27 +944,33 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
        unlock_buffer(bh);
 }
 
-static void gfs2_invalidatepage(struct page *page, unsigned long offset)
+static void gfs2_invalidatepage(struct page *page, unsigned int offset,
+                               unsigned int length)
 {
        struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
+       unsigned int stop = offset + length;
+       int partial_page = (offset || length < PAGE_CACHE_SIZE);
        struct buffer_head *bh, *head;
        unsigned long pos = 0;
 
        BUG_ON(!PageLocked(page));
-       if (offset == 0)
+       if (!partial_page)
                ClearPageChecked(page);
        if (!page_has_buffers(page))
                goto out;
 
        bh = head = page_buffers(page);
        do {
+               if (pos + bh->b_size > stop)
+                       return;
+
                if (offset <= pos)
                        gfs2_discard(sdp, bh);
                pos += bh->b_size;
                bh = bh->b_this_page;
        } while (bh != head);
 out:
-       if (offset == 0)
+       if (!partial_page)
                try_to_release_page(page, 0);
 }
 
index 93b5809c20bb347a9de3e5f0d46949c3dcf78dc5..5e2f56fccf6b3dfd516c04bd01649852dd1e855d 100644 (file)
@@ -1232,7 +1232,9 @@ static int do_grow(struct inode *inode, u64 size)
                unstuff = 1;
        }
 
-       error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
+       error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
+                                (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
+                                 0 : RES_QUOTA), 0);
        if (error)
                goto do_grow_release;
 
index b631c904346084b5fb7b7d96985e724e7b4173a3..0cb4c1557f20c87a44f74b4ac7d29f9a700de05a 100644 (file)
@@ -1125,13 +1125,14 @@ static int dir_double_exhash(struct gfs2_inode *dip)
        if (IS_ERR(hc))
                return PTR_ERR(hc);
 
-       h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
+       hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
        if (hc2 == NULL)
                hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
 
        if (!hc2)
                return -ENOMEM;
 
+       h = hc2;
        error = gfs2_meta_inode_buffer(dip, &dibh);
        if (error)
                goto out_kfree;
@@ -1212,9 +1213,7 @@ static int compare_dents(const void *a, const void *b)
 /**
  * do_filldir_main - read out directory entries
  * @dip: The GFS2 inode
- * @offset: The offset in the file to read from
- * @opaque: opaque data to pass to filldir
- * @filldir: The function to pass entries to
+ * @ctx: what to feed the entries to
  * @darr: an array of struct gfs2_dirent pointers to read
  * @entries: the number of entries in darr
  * @copied: pointer to int that's non-zero if a entry has been copied out
@@ -1224,11 +1223,10 @@ static int compare_dents(const void *a, const void *b)
  * the possibility that they will fall into different readdir buffers or
  * that someone will want to seek to that location.
  *
- * Returns: errno, >0 on exception from filldir
+ * Returns: errno, >0 if the actor tells you to stop
  */
 
-static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
-                          void *opaque, filldir_t filldir,
+static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx,
                           const struct gfs2_dirent **darr, u32 entries,
                           int *copied)
 {
@@ -1236,7 +1234,6 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
        u64 off, off_next;
        unsigned int x, y;
        int run = 0;
-       int error = 0;
 
        sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL);
 
@@ -1253,9 +1250,9 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
                        off_next = be32_to_cpu(dent_next->de_hash);
                        off_next = gfs2_disk_hash2offset(off_next);
 
-                       if (off < *offset)
+                       if (off < ctx->pos)
                                continue;
-                       *offset = off;
+                       ctx->pos = off;
 
                        if (off_next == off) {
                                if (*copied && !run)
@@ -1264,26 +1261,25 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
                        } else
                                run = 0;
                } else {
-                       if (off < *offset)
+                       if (off < ctx->pos)
                                continue;
-                       *offset = off;
+                       ctx->pos = off;
                }
 
-               error = filldir(opaque, (const char *)(dent + 1),
+               if (!dir_emit(ctx, (const char *)(dent + 1),
                                be16_to_cpu(dent->de_name_len),
-                               off, be64_to_cpu(dent->de_inum.no_addr),
-                               be16_to_cpu(dent->de_type));
-               if (error)
+                               be64_to_cpu(dent->de_inum.no_addr),
+                               be16_to_cpu(dent->de_type)))
                        return 1;
 
                *copied = 1;
        }
 
-       /* Increment the *offset by one, so the next time we come into the
+       /* Increment the ctx->pos by one, so the next time we come into the
           do_filldir fxn, we get the next entry instead of the last one in the
           current leaf */
 
-       (*offset)++;
+       ctx->pos++;
 
        return 0;
 }
@@ -1307,8 +1303,8 @@ static void gfs2_free_sort_buffer(void *ptr)
                kfree(ptr);
 }
 
-static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
-                             filldir_t filldir, int *copied, unsigned *depth,
+static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx,
+                             int *copied, unsigned *depth,
                              u64 leaf_no)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
@@ -1386,8 +1382,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
        } while(lfn);
 
        BUG_ON(entries2 != entries);
-       error = do_filldir_main(ip, offset, opaque, filldir, darr,
-                               entries, copied);
+       error = do_filldir_main(ip, ctx, darr, entries, copied);
 out_free:
        for(i = 0; i < leaf; i++)
                brelse(larr[i]);
@@ -1446,15 +1441,13 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
 /**
  * dir_e_read - Reads the entries from a directory into a filldir buffer
  * @dip: dinode pointer
- * @offset: the hash of the last entry read shifted to the right once
- * @opaque: buffer for the filldir function to fill
- * @filldir: points to the filldir function to use
+ * @ctx: actor to feed the entries to
  *
  * Returns: errno
  */
 
-static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
-                     filldir_t filldir, struct file_ra_state *f_ra)
+static int dir_e_read(struct inode *inode, struct dir_context *ctx,
+                     struct file_ra_state *f_ra)
 {
        struct gfs2_inode *dip = GFS2_I(inode);
        u32 hsize, len = 0;
@@ -1465,7 +1458,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
        unsigned depth = 0;
 
        hsize = 1 << dip->i_depth;
-       hash = gfs2_dir_offset2hash(*offset);
+       hash = gfs2_dir_offset2hash(ctx->pos);
        index = hash >> (32 - dip->i_depth);
 
        if (dip->i_hash_cache == NULL)
@@ -1477,7 +1470,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
        gfs2_dir_readahead(inode, hsize, index, f_ra);
 
        while (index < hsize) {
-               error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
+               error = gfs2_dir_read_leaf(inode, ctx,
                                           &copied, &depth,
                                           be64_to_cpu(lp[index]));
                if (error)
@@ -1492,8 +1485,8 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
        return error;
 }
 
-int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
-                 filldir_t filldir, struct file_ra_state *f_ra)
+int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
+                 struct file_ra_state *f_ra)
 {
        struct gfs2_inode *dip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1507,7 +1500,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
                return 0;
 
        if (dip->i_diskflags & GFS2_DIF_EXHASH)
-               return dir_e_read(inode, offset, opaque, filldir, f_ra);
+               return dir_e_read(inode, ctx, f_ra);
 
        if (!gfs2_is_stuffed(dip)) {
                gfs2_consist_inode(dip);
@@ -1539,7 +1532,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
                        error = -EIO;
                        goto out;
                }
-               error = do_filldir_main(dip, offset, opaque, filldir, darr,
+               error = do_filldir_main(dip, ctx, darr,
                                        dip->i_entries, &copied);
 out:
                kfree(darr);
@@ -1555,9 +1548,9 @@ out:
 
 /**
  * gfs2_dir_search - Search a directory
- * @dip: The GFS2 inode
- * @filename:
- * @inode:
+ * @dip: The GFS2 dir inode
+ * @name: The name we are looking up
+ * @fail_on_exist: Fail if the name exists rather than looking it up
  *
  * This routine searches a directory for a file or another directory.
  * Assumes a glock is held on dip.
@@ -1565,22 +1558,25 @@ out:
  * Returns: errno
  */
 
-struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
+struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name,
+                             bool fail_on_exist)
 {
        struct buffer_head *bh;
        struct gfs2_dirent *dent;
-       struct inode *inode;
+       u64 addr, formal_ino;
+       u16 dtype;
 
        dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
        if (dent) {
                if (IS_ERR(dent))
                        return ERR_CAST(dent);
-               inode = gfs2_inode_lookup(dir->i_sb, 
-                               be16_to_cpu(dent->de_type),
-                               be64_to_cpu(dent->de_inum.no_addr),
-                               be64_to_cpu(dent->de_inum.no_formal_ino), 0);
+               dtype = be16_to_cpu(dent->de_type);
+               addr = be64_to_cpu(dent->de_inum.no_addr);
+               formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino);
                brelse(bh);
-               return inode;
+               if (fail_on_exist)
+                       return ERR_PTR(-EEXIST);
+               return gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino, 0);
        }
        return ERR_PTR(-ENOENT);
 }
index 98c960beab35e479460771d19a17e24259a4fe1a..4f03bbd1873f4173faa9d20d2be09b9b1a0f4413 100644 (file)
@@ -18,14 +18,15 @@ struct gfs2_inode;
 struct gfs2_inum;
 
 extern struct inode *gfs2_dir_search(struct inode *dir,
-                                    const struct qstr *filename);
+                                    const struct qstr *filename,
+                                    bool fail_on_exist);
 extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
                          const struct gfs2_inode *ip);
 extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
                        const struct gfs2_inode *ip);
 extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
-extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
-                        filldir_t filldir, struct file_ra_state *f_ra);
+extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
+                        struct file_ra_state *f_ra);
 extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
                          const struct gfs2_inode *nip, unsigned int new_type);
 
index 9973df4ff565b6642247e44f854307a0d696f06a..8b9b3775e2e78ca24b56db57d5ec436efc5f6df3 100644 (file)
@@ -64,6 +64,7 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
 }
 
 struct get_name_filldir {
+       struct dir_context ctx;
        struct gfs2_inum_host inum;
        char *name;
 };
@@ -88,9 +89,11 @@ static int gfs2_get_name(struct dentry *parent, char *name,
        struct inode *dir = parent->d_inode;
        struct inode *inode = child->d_inode;
        struct gfs2_inode *dip, *ip;
-       struct get_name_filldir gnfd;
+       struct get_name_filldir gnfd = {
+               .ctx.actor = get_name_filldir,
+               .name = name
+       };
        struct gfs2_holder gh;
-       u64 offset = 0;
        int error;
        struct file_ra_state f_ra = { .start = 0 };
 
@@ -106,13 +109,12 @@ static int gfs2_get_name(struct dentry *parent, char *name,
        *name = 0;
        gnfd.inum.no_addr = ip->i_no_addr;
        gnfd.inum.no_formal_ino = ip->i_no_formal_ino;
-       gnfd.name = name;
 
        error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh);
        if (error)
                return error;
 
-       error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra);
+       error = gfs2_dir_read(dir, &gnfd.ctx, &f_ra);
 
        gfs2_glock_dq_uninit(&gh);
 
index ad0dc38d87ab74dd7695a74b683a2baf7d3620d3..f99f9e8a325fa1cd37abcae7f2a09cb21ce6dd14 100644 (file)
@@ -82,35 +82,28 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
 }
 
 /**
- * gfs2_readdir - Read directory entries from a directory
+ * gfs2_readdir - Iterator for a directory
  * @file: The directory to read from
- * @dirent: Buffer for dirents
- * @filldir: Function used to do the copying
+ * @ctx: What to feed directory entries to
  *
  * Returns: errno
  */
 
-static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
+static int gfs2_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *dir = file->f_mapping->host;
        struct gfs2_inode *dip = GFS2_I(dir);
        struct gfs2_holder d_gh;
-       u64 offset = file->f_pos;
        int error;
 
-       gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
-       error = gfs2_glock_nq(&d_gh);
-       if (error) {
-               gfs2_holder_uninit(&d_gh);
+       error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
+       if (error)
                return error;
-       }
 
-       error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
+       error = gfs2_dir_read(dir, ctx, &file->f_ra);
 
        gfs2_glock_dq_uninit(&d_gh);
 
-       file->f_pos = offset;
-
        return error;
 }
 
@@ -538,21 +531,30 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
 }
 
 /**
- * gfs2_open - open a file
- * @inode: the inode to open
- * @file: the struct file for this opening
+ * gfs2_open_common - This is common to open and atomic_open
+ * @inode: The inode being opened
+ * @file: The file being opened
  *
- * Returns: errno
+ * This maybe called under a glock or not depending upon how it has
+ * been called. We must always be called under a glock for regular
+ * files, however. For other file types, it does not matter whether
+ * we hold the glock or not.
+ *
+ * Returns: Error code or 0 for success
  */
 
-static int gfs2_open(struct inode *inode, struct file *file)
+int gfs2_open_common(struct inode *inode, struct file *file)
 {
-       struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_holder i_gh;
        struct gfs2_file *fp;
-       int error;
+       int ret;
+
+       if (S_ISREG(inode->i_mode)) {
+               ret = generic_file_open(inode, file);
+               if (ret)
+                       return ret;
+       }
 
-       fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
+       fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
        if (!fp)
                return -ENOMEM;
 
@@ -560,29 +562,43 @@ static int gfs2_open(struct inode *inode, struct file *file)
 
        gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
        file->private_data = fp;
+       return 0;
+}
+
+/**
+ * gfs2_open - open a file
+ * @inode: the inode to open
+ * @file: the struct file for this opening
+ *
+ * After atomic_open, this function is only used for opening files
+ * which are already cached. We must still get the glock for regular
+ * files to ensure that we have the file size uptodate for the large
+ * file check which is in the common code. That is only an issue for
+ * regular files though.
+ *
+ * Returns: errno
+ */
+
+static int gfs2_open(struct inode *inode, struct file *file)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_holder i_gh;
+       int error;
+       bool need_unlock = false;
 
        if (S_ISREG(ip->i_inode.i_mode)) {
                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
                                           &i_gh);
                if (error)
-                       goto fail;
+                       return error;
+               need_unlock = true;
+       }
 
-               if (!(file->f_flags & O_LARGEFILE) &&
-                   i_size_read(inode) > MAX_NON_LFS) {
-                       error = -EOVERFLOW;
-                       goto fail_gunlock;
-               }
+       error = gfs2_open_common(inode, file);
 
+       if (need_unlock)
                gfs2_glock_dq_uninit(&i_gh);
-       }
 
-       return 0;
-
-fail_gunlock:
-       gfs2_glock_dq_uninit(&i_gh);
-fail:
-       file->private_data = NULL;
-       kfree(fp);
        return error;
 }
 
@@ -1048,7 +1064,7 @@ const struct file_operations gfs2_file_fops = {
 };
 
 const struct file_operations gfs2_dir_fops = {
-       .readdir        = gfs2_readdir,
+       .iterate        = gfs2_readdir,
        .unlocked_ioctl = gfs2_ioctl,
        .open           = gfs2_open,
        .release        = gfs2_release,
@@ -1078,7 +1094,7 @@ const struct file_operations gfs2_file_fops_nolock = {
 };
 
 const struct file_operations gfs2_dir_fops_nolock = {
-       .readdir        = gfs2_readdir,
+       .iterate        = gfs2_readdir,
        .unlocked_ioctl = gfs2_ioctl,
        .open           = gfs2_open,
        .release        = gfs2_release,
index c66e99c9757143a89ada8058b6f0c10a59d0d580..5f2e5224c51c9ae79e34a1eb0f405a7b304cd0be 100644 (file)
@@ -54,7 +54,6 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
        struct gfs2_bufdata *bd, *tmp;
        struct buffer_head *bh;
        const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
-       sector_t blocknr;
 
        gfs2_log_lock(sdp);
        spin_lock(&sdp->sd_ail_lock);
@@ -65,13 +64,6 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
                                continue;
                        gfs2_ail_error(gl, bh);
                }
-               blocknr = bh->b_blocknr;
-               bh->b_private = NULL;
-               gfs2_remove_from_ail(bd); /* drops ref on bh */
-
-               bd->bd_bh = NULL;
-               bd->bd_blkno = blocknr;
-
                gfs2_trans_add_revoke(sdp, bd);
        }
        GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
index 62b484e4a9e4e46ac905e9a24e8afdce330225d2..bbb2715171cd0c983770cf86b4b57ed69e04c98d 100644 (file)
@@ -313,7 +313,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
                        goto out;
        }
 
-       inode = gfs2_dir_search(dir, name);
+       inode = gfs2_dir_search(dir, name, false);
        if (IS_ERR(inode))
                error = PTR_ERR(inode);
 out:
@@ -346,17 +346,6 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
        if (!dip->i_inode.i_nlink)
                return -ENOENT;
 
-       error = gfs2_dir_check(&dip->i_inode, name, NULL);
-       switch (error) {
-       case -ENOENT:
-               error = 0;
-               break;
-       case 0:
-               return -EEXIST;
-       default:
-               return error;
-       }
-
        if (dip->i_entries == (u32)-1)
                return -EFBIG;
        if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
@@ -546,6 +535,7 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
  * gfs2_create_inode - Create a new inode
  * @dir: The parent directory
  * @dentry: The new dentry
+ * @file: If non-NULL, the file which is being opened
  * @mode: The permissions on the new inode
  * @dev: For device nodes, this is the device number
  * @symname: For symlinks, this is the link destination
@@ -555,8 +545,9 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
  */
 
 static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+                            struct file *file,
                             umode_t mode, dev_t dev, const char *symname,
-                            unsigned int size, int excl)
+                            unsigned int size, int excl, int *opened)
 {
        const struct qstr *name = &dentry->d_name;
        struct gfs2_holder ghs[2];
@@ -564,6 +555,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        struct gfs2_inode *dip = GFS2_I(dir), *ip;
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
        struct gfs2_glock *io_gl;
+       struct dentry *d;
        int error;
        u32 aflags = 0;
        int arq;
@@ -584,15 +576,30 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                goto fail;
 
        error = create_ok(dip, name, mode);
-       if ((error == -EEXIST) && S_ISREG(mode) && !excl) {
-               inode = gfs2_lookupi(dir, &dentry->d_name, 0);
-               gfs2_glock_dq_uninit(ghs);
-               d_instantiate(dentry, inode);
-               return IS_ERR(inode) ? PTR_ERR(inode) : 0;
-       }
        if (error)
                goto fail_gunlock;
 
+       inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl);
+       error = PTR_ERR(inode);
+       if (!IS_ERR(inode)) {
+               d = d_splice_alias(inode, dentry);
+               error = 0;
+               if (file && !IS_ERR(d)) {
+                       if (d == NULL)
+                               d = dentry;
+                       if (S_ISREG(inode->i_mode))
+                               error = finish_open(file, d, gfs2_open_common, opened);
+                       else
+                               error = finish_no_open(file, d);
+               }
+               gfs2_glock_dq_uninit(ghs);
+               if (IS_ERR(d))
+                       return PTR_RET(d);
+               return error;
+       } else if (error != -ENOENT) {
+               goto fail_gunlock;
+       }
+
        arq = error = gfs2_diradd_alloc_required(dir, name);
        if (error < 0)
                goto fail_gunlock;
@@ -686,10 +693,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                goto fail_gunlock3;
 
        mark_inode_dirty(inode);
+       d_instantiate(dentry, inode);
+       if (file)
+               error = finish_open(file, dentry, gfs2_open_common, opened);
        gfs2_glock_dq_uninit(ghs);
        gfs2_glock_dq_uninit(ghs + 1);
-       d_instantiate(dentry, inode);
-       return 0;
+       return error;
 
 fail_gunlock3:
        gfs2_glock_dq_uninit(ghs + 1);
@@ -729,36 +738,56 @@ fail:
 static int gfs2_create(struct inode *dir, struct dentry *dentry,
                       umode_t mode, bool excl)
 {
-       return gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0, excl);
+       return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL);
 }
 
 /**
- * gfs2_lookup - Look up a filename in a directory and return its inode
+ * __gfs2_lookup - Look up a filename in a directory and return its inode
  * @dir: The directory inode
  * @dentry: The dentry of the new inode
- * @nd: passed from Linux VFS, ignored by us
+ * @file: File to be opened
+ * @opened: atomic_open flags
  *
- * Called by the VFS layer. Lock dir and call gfs2_lookupi()
  *
  * Returns: errno
  */
 
-static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
-                                 unsigned int flags)
+static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
+                                   struct file *file, int *opened)
 {
-       struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0);
-       if (inode && !IS_ERR(inode)) {
-               struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
-               struct gfs2_holder gh;
-               int error;
-               error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
-               if (error) {
-                       iput(inode);
-                       return ERR_PTR(error);
-               }
-               gfs2_glock_dq_uninit(&gh);
+       struct inode *inode;
+       struct dentry *d;
+       struct gfs2_holder gh;
+       struct gfs2_glock *gl;
+       int error;
+
+       inode = gfs2_lookupi(dir, &dentry->d_name, 0);
+       if (!inode)
+               return NULL;
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
+
+       gl = GFS2_I(inode)->i_gl;
+       error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
+       if (error) {
+               iput(inode);
+               return ERR_PTR(error);
        }
-       return d_splice_alias(inode, dentry);
+
+       d = d_splice_alias(inode, dentry);
+       if (file && S_ISREG(inode->i_mode))
+               error = finish_open(file, dentry, gfs2_open_common, opened);
+
+       gfs2_glock_dq_uninit(&gh);
+       if (error)
+               return ERR_PTR(error);
+       return d;
+}
+
+static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
+                                 unsigned flags)
+{
+       return __gfs2_lookup(dir, dentry, NULL, NULL);
 }
 
 /**
@@ -1076,7 +1105,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
        if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1)
                return -ENAMETOOLONG;
 
-       return gfs2_create_inode(dir, dentry, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
+       return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
 }
 
 /**
@@ -1092,7 +1121,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
        struct gfs2_sbd *sdp = GFS2_SB(dir);
        unsigned dsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
-       return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, dsize, 0);
+       return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
 }
 
 /**
@@ -1107,7 +1136,43 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
                      dev_t dev)
 {
-       return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0, 0);
+       return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL);
+}
+
+/**
+ * gfs2_atomic_open - Atomically open a file
+ * @dir: The directory
+ * @dentry: The proposed new entry
+ * @file: The proposed new struct file
+ * @flags: open flags
+ * @mode: File mode
+ * @opened: Flag to say whether the file has been opened or not
+ *
+ * Returns: error code or 0 for success
+ */
+
+static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
+                            struct file *file, unsigned flags,
+                            umode_t mode, int *opened)
+{
+       struct dentry *d;
+       bool excl = !!(flags & O_EXCL);
+
+       d = __gfs2_lookup(dir, dentry, file, opened);
+       if (IS_ERR(d))
+               return PTR_ERR(d);
+       if (d == NULL)
+               d = dentry;
+       if (d->d_inode) {
+               if (!(*opened & FILE_OPENED))
+                       return finish_no_open(file, d);
+               return 0;
+       }
+
+       if (!(flags & O_CREAT))
+               return -ENOENT;
+
+       return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened);
 }
 
 /*
@@ -1787,6 +1852,7 @@ const struct inode_operations gfs2_dir_iops = {
        .removexattr = gfs2_removexattr,
        .fiemap = gfs2_fiemap,
        .get_acl = gfs2_get_acl,
+       .atomic_open = gfs2_atomic_open,
 };
 
 const struct inode_operations gfs2_symlink_iops = {
index c53c7477f6daba17c1ed0902672c0b8cc624b2de..ba4d9492d422b4801aa96bfb6c5969ac30e6f0b9 100644 (file)
@@ -109,6 +109,7 @@ extern int gfs2_permission(struct inode *inode, int mask);
 extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr);
 extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
 extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
+extern int gfs2_open_common(struct inode *inode, struct file *file);
 
 extern const struct inode_operations gfs2_file_iops;
 extern const struct inode_operations gfs2_dir_iops;
index b404f4853034826fa0202d6e69489a19e20bf759..610613fb65b552dccfdf9853ac96d2f667bd36d4 100644 (file)
@@ -211,15 +211,16 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
 {
        struct gfs2_trans *tr, *s;
+       int oldest_tr = 1;
        int ret;
 
        spin_lock(&sdp->sd_ail_lock);
        list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
                gfs2_ail1_empty_one(sdp, tr);
-               if (list_empty(&tr->tr_ail1_list))
+               if (list_empty(&tr->tr_ail1_list) && oldest_tr)
                        list_move(&tr->tr_list, &sdp->sd_ail2_list);
                else
-                       break;
+                       oldest_tr = 0;
        }
        ret = list_empty(&sdp->sd_ail1_list);
        spin_unlock(&sdp->sd_ail_lock);
@@ -317,7 +318,7 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
 
 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
 {
-       unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
+       unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
        unsigned wanted = blks + reserved_blks;
        DEFINE_WAIT(wait);
        int did_wait = 0;
@@ -545,6 +546,76 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip)
        spin_unlock(&sdp->sd_ordered_lock);
 }
 
+void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
+{
+       struct buffer_head *bh = bd->bd_bh;
+       struct gfs2_glock *gl = bd->bd_gl;
+
+       gfs2_remove_from_ail(bd);
+       bd->bd_bh = NULL;
+       bh->b_private = NULL;
+       bd->bd_blkno = bh->b_blocknr;
+       bd->bd_ops = &gfs2_revoke_lops;
+       sdp->sd_log_num_revoke++;
+       atomic_inc(&gl->gl_revokes);
+       set_bit(GLF_LFLUSH, &gl->gl_flags);
+       list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
+}
+
+void gfs2_write_revokes(struct gfs2_sbd *sdp)
+{
+       struct gfs2_trans *tr;
+       struct gfs2_bufdata *bd, *tmp;
+       int have_revokes = 0;
+       int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
+
+       gfs2_ail1_empty(sdp);
+       spin_lock(&sdp->sd_ail_lock);
+       list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
+               list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
+                       if (list_empty(&bd->bd_list)) {
+                               have_revokes = 1;
+                               goto done;
+                       }
+               }
+       }
+done:
+       spin_unlock(&sdp->sd_ail_lock);
+       if (have_revokes == 0)
+               return;
+       while (sdp->sd_log_num_revoke > max_revokes)
+               max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+       max_revokes -= sdp->sd_log_num_revoke;
+       if (!sdp->sd_log_num_revoke) {
+               atomic_dec(&sdp->sd_log_blks_free);
+               /* If no blocks have been reserved, we need to also
+                * reserve a block for the header */
+               if (!sdp->sd_log_blks_reserved)
+                       atomic_dec(&sdp->sd_log_blks_free);
+       }
+       gfs2_log_lock(sdp);
+       spin_lock(&sdp->sd_ail_lock);
+       list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
+               list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
+                       if (max_revokes == 0)
+                               goto out_of_blocks;
+                       if (!list_empty(&bd->bd_list))
+                               continue;
+                       gfs2_add_revoke(sdp, bd);
+                       max_revokes--;
+               }
+       }
+out_of_blocks:
+       spin_unlock(&sdp->sd_ail_lock);
+       gfs2_log_unlock(sdp);
+
+       if (!sdp->sd_log_num_revoke) {
+               atomic_inc(&sdp->sd_log_blks_free);
+               if (!sdp->sd_log_blks_reserved)
+                       atomic_inc(&sdp->sd_log_blks_free);
+       }
+}
+
 /**
  * log_write_header - Get and initialize a journal header buffer
  * @sdp: The GFS2 superblock
@@ -562,7 +633,6 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        lh = page_address(page);
        clear_page(lh);
 
-       gfs2_ail1_empty(sdp);
        tail = current_tail(sdp);
 
        lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
index 3566f35915e06f4abb9ecda802f5b55cc37d8dd9..37216634f0aaadc55b4b702220a3f070e4bf90b7 100644 (file)
@@ -72,5 +72,7 @@ extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
 extern void gfs2_log_shutdown(struct gfs2_sbd *sdp);
 extern void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
 extern int gfs2_logd(void *data);
+extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+extern void gfs2_write_revokes(struct gfs2_sbd *sdp);
 
 #endif /* __LOG_DOT_H__ */
index 6c33d7b6e0c4e26b6b02d74d8d5e090f26c820e9..17c5b5d7dc88c4b73e00c5918f97473df15b354f 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/bio.h>
 #include <linux/fs.h>
+#include <linux/list_sort.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -401,6 +402,20 @@ static void gfs2_check_magic(struct buffer_head *bh)
        kunmap_atomic(kaddr);
 }
 
+static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+       struct gfs2_bufdata *bda, *bdb;
+
+       bda = list_entry(a, struct gfs2_bufdata, bd_list);
+       bdb = list_entry(b, struct gfs2_bufdata, bd_list);
+
+       if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
+               return -1;
+       if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
+               return 1;
+       return 0;
+}
+
 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
                                unsigned int total, struct list_head *blist,
                                bool is_databuf)
@@ -413,6 +428,7 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
        __be64 *ptr;
 
        gfs2_log_lock(sdp);
+       list_sort(NULL, blist, blocknr_cmp);
        bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
        while(total) {
                num = total;
@@ -590,6 +606,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
        struct page *page;
        unsigned int length;
 
+       gfs2_write_revokes(sdp);
        if (!sdp->sd_log_num_revoke)
                return;
 
@@ -836,10 +853,6 @@ const struct gfs2_log_operations gfs2_revoke_lops = {
        .lo_name = "revoke",
 };
 
-const struct gfs2_log_operations gfs2_rg_lops = {
-       .lo_name = "rg",
-};
-
 const struct gfs2_log_operations gfs2_databuf_lops = {
        .lo_before_commit = databuf_lo_before_commit,
        .lo_after_commit = databuf_lo_after_commit,
@@ -851,7 +864,6 @@ const struct gfs2_log_operations gfs2_databuf_lops = {
 const struct gfs2_log_operations *gfs2_log_ops[] = {
        &gfs2_databuf_lops,
        &gfs2_buf_lops,
-       &gfs2_rg_lops,
        &gfs2_revoke_lops,
        NULL,
 };
index 87e062e05c92a597587d244230140b5a09e126af..9ca2e6438419a93f0100f16e26405a4e86097922 100644 (file)
@@ -23,7 +23,6 @@
 extern const struct gfs2_log_operations gfs2_glock_lops;
 extern const struct gfs2_log_operations gfs2_buf_lops;
 extern const struct gfs2_log_operations gfs2_revoke_lops;
-extern const struct gfs2_log_operations gfs2_rg_lops;
 extern const struct gfs2_log_operations gfs2_databuf_lops;
 
 extern const struct gfs2_log_operations *gfs2_log_ops[];
index 1a89afb6847204af09957cd77349b1e7e86314d1..0da390686c08f458e12aeb44df92d7301a96d788 100644 (file)
@@ -296,10 +296,6 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
        if (bd) {
                spin_lock(&sdp->sd_ail_lock);
                if (bd->bd_tr) {
-                       gfs2_remove_from_ail(bd);
-                       bh->b_private = NULL;
-                       bd->bd_bh = NULL;
-                       bd->bd_blkno = bh->b_blocknr;
                        gfs2_trans_add_revoke(sdp, bd);
                }
                spin_unlock(&sdp->sd_ail_lock);
index 60ede2a0f43fbc498201cc3262da939110eac0bb..0262c190b6f95c6c7dec1da9a8937db4e0701724 100644 (file)
@@ -916,16 +916,16 @@ static int init_threads(struct gfs2_sbd *sdp, int undo)
                goto fail_quotad;
 
        p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
-       error = IS_ERR(p);
-       if (error) {
+       if (IS_ERR(p)) {
+               error = PTR_ERR(p);
                fs_err(sdp, "can't start logd thread: %d\n", error);
                return error;
        }
        sdp->sd_logd_process = p;
 
        p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
-       error = IS_ERR(p);
-       if (error) {
+       if (IS_ERR(p)) {
+               error = PTR_ERR(p);
                fs_err(sdp, "can't start quotad thread: %d\n", error);
                goto fail;
        }
index c253b13722e8a8e2fc08b9708da11f74fe3b270a..3768c2f40e43350f3586769e9b40974ac9138a74 100644 (file)
@@ -1154,11 +1154,6 @@ int gfs2_quota_sync(struct super_block *sb, int type)
        return error;
 }
 
-static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
-{
-       return gfs2_quota_sync(sb, type);
-}
-
 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
 {
        struct gfs2_quota_data *qd;
@@ -1414,7 +1409,7 @@ int gfs2_quotad(void *data)
                                           &tune->gt_statfs_quantum);
 
                /* Update quota file */
-               quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
+               quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
                                   &quotad_timeo, &tune->gt_quota_quantum);
 
                /* Check for & recover partially truncated inodes */
index 9809156e3d044e9ca4c39dd9b58c69105f282ab0..69317435faa723c9288d390407d9343fb6bf1096 100644 (file)
@@ -1288,13 +1288,15 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
        minlen = max_t(u64, r.minlen,
                       q->limits.discard_granularity) >> bs_shift;
 
+       if (end <= start || minlen > sdp->sd_max_rg_data)
+               return -EINVAL;
+
        rgd = gfs2_blk2rgrpd(sdp, start, 0);
-       rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0);
+       rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
 
-       if (end <= start ||
-           minlen > sdp->sd_max_rg_data ||
-           start > rgd_end->rd_data0 + rgd_end->rd_data)
-               return -EINVAL;
+       if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
+           && (start > rgd_end->rd_data0 + rgd_end->rd_data))
+               return -EINVAL; /* start is beyond the end of the fs */
 
        while (1) {
 
@@ -1336,7 +1338,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
        }
 
 out:
-       r.len = trimmed << 9;
+       r.len = trimmed << bs_shift;
        if (copy_to_user(argp, &r, sizeof(r)))
                return -EFAULT;
 
index 7374907742a84ce2a1f48a0c168f5eb91c697283..2b20d7046bf353ff58381821d1fea363010f3716 100644 (file)
@@ -270,19 +270,12 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
 
 void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 {
-       struct gfs2_glock *gl = bd->bd_gl;
        struct gfs2_trans *tr = current->journal_info;
 
        BUG_ON(!list_empty(&bd->bd_list));
-       BUG_ON(!list_empty(&bd->bd_ail_st_list));
-       BUG_ON(!list_empty(&bd->bd_ail_gl_list));
-       bd->bd_ops = &gfs2_revoke_lops;
+       gfs2_add_revoke(sdp, bd);
        tr->tr_touched = 1;
        tr->tr_num_revoke++;
-       sdp->sd_log_num_revoke++;
-       atomic_inc(&gl->gl_revokes);
-       set_bit(GLF_LFLUSH, &gl->gl_flags);
-       list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
 }
 
 void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
index e0101b6fb0d73c4a9036a481f5aabd6ddbe213ce..145566851e7a821fd1d87bbb9c111ce839ecf7db 100644 (file)
@@ -51,9 +51,9 @@ done:
 /*
  * hfs_readdir
  */
-static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int hfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        int len, err;
        char strbuf[HFS_MAX_NAMELEN];
@@ -62,7 +62,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        struct hfs_readdir_data *rd;
        u16 type;
 
-       if (filp->f_pos >= inode->i_size)
+       if (ctx->pos >= inode->i_size)
                return 0;
 
        err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
@@ -73,14 +73,13 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (err)
                goto out;
 
-       switch ((u32)filp->f_pos) {
-       case 0:
+       if (ctx->pos == 0) {
                /* This is completely artificial... */
-               if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
+               if (!dir_emit_dot(file, ctx))
                        goto out;
-               filp->f_pos++;
-               /* fall through */
-       case 1:
+               ctx->pos = 1;
+       }
+       if (ctx->pos == 1) {
                if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
                        err = -EIO;
                        goto out;
@@ -97,18 +96,16 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                //      err = -EIO;
                //      goto out;
                //}
-               if (filldir(dirent, "..", 2, 1,
+               if (!dir_emit(ctx, "..", 2,
                            be32_to_cpu(entry.thread.ParID), DT_DIR))
                        goto out;
-               filp->f_pos++;
-               /* fall through */
-       default:
-               if (filp->f_pos >= inode->i_size)
-                       goto out;
-               err = hfs_brec_goto(&fd, filp->f_pos - 1);
-               if (err)
-                       goto out;
+               ctx->pos = 2;
        }
+       if (ctx->pos >= inode->i_size)
+               goto out;
+       err = hfs_brec_goto(&fd, ctx->pos - 1);
+       if (err)
+               goto out;
 
        for (;;) {
                if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) {
@@ -131,7 +128,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                err = -EIO;
                                goto out;
                        }
-                       if (filldir(dirent, strbuf, len, filp->f_pos,
+                       if (!dir_emit(ctx, strbuf, len,
                                    be32_to_cpu(entry.dir.DirID), DT_DIR))
                                break;
                } else if (type == HFS_CDR_FIL) {
@@ -140,7 +137,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                err = -EIO;
                                goto out;
                        }
-                       if (filldir(dirent, strbuf, len, filp->f_pos,
+                       if (!dir_emit(ctx, strbuf, len,
                                    be32_to_cpu(entry.file.FlNum), DT_REG))
                                break;
                } else {
@@ -148,22 +145,22 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        err = -EIO;
                        goto out;
                }
-               filp->f_pos++;
-               if (filp->f_pos >= inode->i_size)
+               ctx->pos++;
+               if (ctx->pos >= inode->i_size)
                        goto out;
                err = hfs_brec_goto(&fd, 1);
                if (err)
                        goto out;
        }
-       rd = filp->private_data;
+       rd = file->private_data;
        if (!rd) {
                rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL);
                if (!rd) {
                        err = -ENOMEM;
                        goto out;
                }
-               filp->private_data = rd;
-               rd->file = filp;
+               file->private_data = rd;
+               rd->file = file;
                list_add(&rd->list, &HFS_I(inode)->open_dir_list);
        }
        memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key));
@@ -306,7 +303,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 const struct file_operations hfs_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = hfs_readdir,
+       .iterate        = hfs_readdir,
        .llseek         = generic_file_llseek,
        .release        = hfs_dir_release,
 };
index a37ac934732f6707bbf6a0825f9d04d179c76743..d8ce4bd17fc5f43058eaae416532b2e4019cc870 100644 (file)
@@ -121,9 +121,9 @@ fail:
        return ERR_PTR(err);
 }
 
-static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int hfsplus_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        int len, err;
        char strbuf[HFSPLUS_MAX_STRLEN + 1];
@@ -132,7 +132,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
        struct hfsplus_readdir_data *rd;
        u16 type;
 
-       if (filp->f_pos >= inode->i_size)
+       if (file->f_pos >= inode->i_size)
                return 0;
 
        err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
@@ -143,14 +143,13 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (err)
                goto out;
 
-       switch ((u32)filp->f_pos) {
-       case 0:
+       if (ctx->pos == 0) {
                /* This is completely artificial... */
-               if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
+               if (!dir_emit_dot(file, ctx))
                        goto out;
-               filp->f_pos++;
-               /* fall through */
-       case 1:
+               ctx->pos = 1;
+       }
+       if (ctx->pos == 1) {
                if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
                        err = -EIO;
                        goto out;
@@ -168,19 +167,16 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        err = -EIO;
                        goto out;
                }
-               if (filldir(dirent, "..", 2, 1,
+               if (!dir_emit(ctx, "..", 2,
                            be32_to_cpu(entry.thread.parentID), DT_DIR))
                        goto out;
-               filp->f_pos++;
-               /* fall through */
-       default:
-               if (filp->f_pos >= inode->i_size)
-                       goto out;
-               err = hfs_brec_goto(&fd, filp->f_pos - 1);
-               if (err)
-                       goto out;
+               ctx->pos = 2;
        }
-
+       if (ctx->pos >= inode->i_size)
+               goto out;
+       err = hfs_brec_goto(&fd, ctx->pos - 1);
+       if (err)
+               goto out;
        for (;;) {
                if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
                        pr_err("walked past end of dir\n");
@@ -211,7 +207,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                            HFSPLUS_SB(sb)->hidden_dir->i_ino ==
                                        be32_to_cpu(entry.folder.id))
                                goto next;
-                       if (filldir(dirent, strbuf, len, filp->f_pos,
+                       if (!dir_emit(ctx, strbuf, len,
                                    be32_to_cpu(entry.folder.id), DT_DIR))
                                break;
                } else if (type == HFSPLUS_FILE) {
@@ -220,7 +216,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                err = -EIO;
                                goto out;
                        }
-                       if (filldir(dirent, strbuf, len, filp->f_pos,
+                       if (!dir_emit(ctx, strbuf, len,
                                    be32_to_cpu(entry.file.id), DT_REG))
                                break;
                } else {
@@ -229,22 +225,22 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        goto out;
                }
 next:
-               filp->f_pos++;
-               if (filp->f_pos >= inode->i_size)
+               ctx->pos++;
+               if (ctx->pos >= inode->i_size)
                        goto out;
                err = hfs_brec_goto(&fd, 1);
                if (err)
                        goto out;
        }
-       rd = filp->private_data;
+       rd = file->private_data;
        if (!rd) {
                rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL);
                if (!rd) {
                        err = -ENOMEM;
                        goto out;
                }
-               filp->private_data = rd;
-               rd->file = filp;
+               file->private_data = rd;
+               rd->file = file;
                list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
        }
        memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
@@ -538,7 +534,7 @@ const struct inode_operations hfsplus_dir_inode_operations = {
 const struct file_operations hfsplus_dir_operations = {
        .fsync          = hfsplus_file_fsync,
        .read           = generic_read_dir,
-       .readdir        = hfsplus_readdir,
+       .iterate        = hfsplus_readdir,
        .unlocked_ioctl = hfsplus_ioctl,
        .llseek         = generic_file_llseek,
        .release        = hfsplus_dir_release,
index 32f35f18798902de1eb9b26d320dfe042cad7ff4..cddb0521751278526dfc1b678acbf708a906a815 100644 (file)
@@ -277,7 +277,7 @@ static const struct super_operations hostfs_sbops = {
        .show_options   = hostfs_show_options,
 };
 
-int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
+int hostfs_readdir(struct file *file, struct dir_context *ctx)
 {
        void *dir;
        char *name;
@@ -292,12 +292,11 @@ int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
        __putname(name);
        if (dir == NULL)
                return -error;
-       next = file->f_pos;
+       next = ctx->pos;
        while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) {
-               error = (*filldir)(ent, name, len, file->f_pos,
-                                  ino, type);
-               if (error) break;
-               file->f_pos = next;
+               if (!dir_emit(ctx, name, len, ino, type))
+                       break;
+               ctx->pos = next;
        }
        close_dir(dir);
        return 0;
@@ -393,7 +392,7 @@ static const struct file_operations hostfs_file_fops = {
 
 static const struct file_operations hostfs_dir_fops = {
        .llseek         = generic_file_llseek,
-       .readdir        = hostfs_readdir,
+       .iterate        = hostfs_readdir,
        .read           = generic_read_dir,
 };
 
index 834ac13c04b7976442a72b8c7ee20fa3d8b443b2..292b1acb9b817abb65d71665356e52a30405ab91 100644 (file)
@@ -57,14 +57,14 @@ fail:
        return -ESPIPE;
 }
 
-static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int hpfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
        struct quad_buffer_head qbh;
        struct hpfs_dirent *de;
        int lc;
-       long old_pos;
+       loff_t next_pos;
        unsigned char *tempname;
        int c1, c2 = 0;
        int ret = 0;
@@ -105,11 +105,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                }
        }
        lc = hpfs_sb(inode->i_sb)->sb_lowercase;
-       if (filp->f_pos == 12) { /* diff -r requires this (note, that diff -r */
-               filp->f_pos = 13; /* also fails on msdos filesystem in 2.0) */
+       if (ctx->pos == 12) { /* diff -r requires this (note, that diff -r */
+               ctx->pos = 13; /* also fails on msdos filesystem in 2.0) */
                goto out;
        }
-       if (filp->f_pos == 13) {
+       if (ctx->pos == 13) {
                ret = -ENOENT;
                goto out;
        }
@@ -120,33 +120,34 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                   accepted by filldir, but what can I do?
                   maybe killall -9 ls helps */
                if (hpfs_sb(inode->i_sb)->sb_chk)
-                       if (hpfs_stop_cycles(inode->i_sb, filp->f_pos, &c1, &c2, "hpfs_readdir")) {
+                       if (hpfs_stop_cycles(inode->i_sb, ctx->pos, &c1, &c2, "hpfs_readdir")) {
                                ret = -EFSERROR;
                                goto out;
                        }
-               if (filp->f_pos == 12)
+               if (ctx->pos == 12)
                        goto out;
-               if (filp->f_pos == 3 || filp->f_pos == 4 || filp->f_pos == 5) {
-                       printk("HPFS: warning: pos==%d\n",(int)filp->f_pos);
+               if (ctx->pos == 3 || ctx->pos == 4 || ctx->pos == 5) {
+                       printk("HPFS: warning: pos==%d\n",(int)ctx->pos);
                        goto out;
                }
-               if (filp->f_pos == 0) {
-                       if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0)
+               if (ctx->pos == 0) {
+                       if (!dir_emit_dot(file, ctx))
                                goto out;
-                       filp->f_pos = 11;
+                       ctx->pos = 11;
                }
-               if (filp->f_pos == 11) {
-                       if (filldir(dirent, "..", 2, filp->f_pos, hpfs_inode->i_parent_dir, DT_DIR) < 0)
+               if (ctx->pos == 11) {
+                       if (!dir_emit(ctx, "..", 2, hpfs_inode->i_parent_dir, DT_DIR))
                                goto out;
-                       filp->f_pos = 1;
+                       ctx->pos = 1;
                }
-               if (filp->f_pos == 1) {
-                       filp->f_pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
-                       hpfs_add_pos(inode, &filp->f_pos);
-                       filp->f_version = inode->i_version;
+               if (ctx->pos == 1) {
+                       ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
+                       hpfs_add_pos(inode, &file->f_pos);
+                       file->f_version = inode->i_version;
                }
-               old_pos = filp->f_pos;
-               if (!(de = map_pos_dirent(inode, &filp->f_pos, &qbh))) {
+               next_pos = ctx->pos;
+               if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) {
+                       ctx->pos = next_pos;
                        ret = -EIOERROR;
                        goto out;
                }
@@ -154,20 +155,21 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        if (hpfs_sb(inode->i_sb)->sb_chk) {
                                if (de->first && !de->last && (de->namelen != 2
                                    || de ->name[0] != 1 || de->name[1] != 1))
-                                       hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos);
+                                       hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", (unsigned long)ctx->pos);
                                if (de->last && (de->namelen != 1 || de ->name[0] != 255))
-                                       hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos);
+                                       hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", (unsigned long)ctx->pos);
                        }
                        hpfs_brelse4(&qbh);
+                       ctx->pos = next_pos;
                        goto again;
                }
                tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
-               if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) {
-                       filp->f_pos = old_pos;
+               if (!dir_emit(ctx, tempname, de->namelen, le32_to_cpu(de->fnode), DT_UNKNOWN)) {
                        if (tempname != de->name) kfree(tempname);
                        hpfs_brelse4(&qbh);
                        goto out;
                }
+               ctx->pos = next_pos;
                if (tempname != de->name) kfree(tempname);
                hpfs_brelse4(&qbh);
        }
@@ -322,7 +324,7 @@ const struct file_operations hpfs_dir_ops =
 {
        .llseek         = hpfs_dir_lseek,
        .read           = generic_read_dir,
-       .readdir        = hpfs_readdir,
+       .iterate        = hpfs_readdir,
        .release        = hpfs_dir_release,
        .fsync          = hpfs_file_fsync,
 };
index cd3e38972c86a8ae27ed8abfcb90e8466eac3490..fc90ab11c34049e6431e0b9dcf220040cfbbb146 100644 (file)
@@ -542,8 +542,8 @@ static const struct file_operations hppfs_file_fops = {
 };
 
 struct hppfs_dirent {
-       void *vfs_dirent;
-       filldir_t filldir;
+       struct dir_context ctx;
+       struct dir_context *caller;
        struct dentry *dentry;
 };
 
@@ -555,34 +555,29 @@ static int hppfs_filldir(void *d, const char *name, int size,
        if (file_removed(dirent->dentry, name))
                return 0;
 
-       return (*dirent->filldir)(dirent->vfs_dirent, name, size, offset,
-                                 inode, type);
+       dirent->caller->pos = dirent->ctx.pos;
+       return !dir_emit(dirent->caller, name, size, inode, type);
 }
 
-static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir)
+static int hppfs_readdir(struct file *file, struct dir_context *ctx)
 {
        struct hppfs_private *data = file->private_data;
        struct file *proc_file = data->proc_file;
-       int (*readdir)(struct file *, void *, filldir_t);
-       struct hppfs_dirent dirent = ((struct hppfs_dirent)
-                                     { .vfs_dirent     = ent,
-                                       .filldir        = filldir,
-                                       .dentry         = file->f_path.dentry
-                                     });
+       struct hppfs_dirent d = {
+               .ctx.actor      = hppfs_filldir,
+               .caller         = ctx,
+               .dentry         = file->f_path.dentry
+       };
        int err;
-
-       readdir = file_inode(proc_file)->i_fop->readdir;
-
-       proc_file->f_pos = file->f_pos;
-       err = (*readdir)(proc_file, &dirent, hppfs_filldir);
-       file->f_pos = proc_file->f_pos;
-
+       proc_file->f_pos = ctx->pos;
+       err = iterate_dir(proc_file, &d.ctx);
+       ctx->pos = d.ctx.pos;
        return err;
 }
 
 static const struct file_operations hppfs_dir_fops = {
        .owner          = NULL,
-       .readdir        = hppfs_readdir,
+       .iterate        = hppfs_readdir,
        .open           = hppfs_dir_open,
        .llseek         = default_llseek,
        .release        = hppfs_release,
index eaa75f75b625c572ec6e75650b3e6fb89d715a4a..68121584ae37d40a074bf81e55184ecb1674d882 100644 (file)
@@ -131,6 +131,12 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
  */
 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
 
+/*
+ * splice.c
+ */
+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+               loff_t *opos, size_t len, unsigned int flags);
+
 /*
  * pipe.c
  */
index a7d5c3c3d4e63a1638eb53bb04af666b388844c5..b943cbd963bb98c9ba89edc814cfe4c3223e8c3a 100644 (file)
@@ -78,8 +78,8 @@ int get_acorn_filename(struct iso_directory_record *de,
 /*
  * This should _really_ be cleaned up some day..
  */
-static int do_isofs_readdir(struct inode *inode, struct file *filp,
-               void *dirent, filldir_t filldir,
+static int do_isofs_readdir(struct inode *inode, struct file *file,
+               struct dir_context *ctx,
                char *tmpname, struct iso_directory_record *tmpde)
 {
        unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
@@ -94,10 +94,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
        struct iso_directory_record *de;
        struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
 
-       offset = filp->f_pos & (bufsize - 1);
-       block = filp->f_pos >> bufbits;
+       offset = ctx->pos & (bufsize - 1);
+       block = ctx->pos >> bufbits;
 
-       while (filp->f_pos < inode->i_size) {
+       while (ctx->pos < inode->i_size) {
                int de_len;
 
                if (!bh) {
@@ -108,7 +108,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
 
                de = (struct iso_directory_record *) (bh->b_data + offset);
 
-               de_len = *(unsigned char *) de;
+               de_len = *(unsigned char *)de;
 
                /*
                 * If the length byte is zero, we should move on to the next
@@ -119,8 +119,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
                if (de_len == 0) {
                        brelse(bh);
                        bh = NULL;
-                       filp->f_pos = (filp->f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
-                       block = filp->f_pos >> bufbits;
+                       ctx->pos = (ctx->pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
+                       block = ctx->pos >> bufbits;
                        offset = 0;
                        continue;
                }
@@ -164,16 +164,16 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
 
                if (de->flags[-sbi->s_high_sierra] & 0x80) {
                        first_de = 0;
-                       filp->f_pos += de_len;
+                       ctx->pos += de_len;
                        continue;
                }
                first_de = 1;
 
                /* Handle the case of the '.' directory */
                if (de->name_len[0] == 1 && de->name[0] == 0) {
-                       if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0)
+                       if (!dir_emit_dot(file, ctx))
                                break;
-                       filp->f_pos += de_len;
+                       ctx->pos += de_len;
                        continue;
                }
 
@@ -181,10 +181,9 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
 
                /* Handle the case of the '..' directory */
                if (de->name_len[0] == 1 && de->name[0] == 1) {
-                       inode_number = parent_ino(filp->f_path.dentry);
-                       if (filldir(dirent, "..", 2, filp->f_pos, inode_number, DT_DIR) < 0)
+                       if (!dir_emit_dotdot(file, ctx))
                                break;
-                       filp->f_pos += de_len;
+                       ctx->pos += de_len;
                        continue;
                }
 
@@ -198,7 +197,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
                if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) ||
                    (!sbi->s_showassoc &&
                                (de->flags[-sbi->s_high_sierra] & 4))) {
-                       filp->f_pos += de_len;
+                       ctx->pos += de_len;
                        continue;
                }
 
@@ -230,10 +229,10 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
                        }
                }
                if (len > 0) {
-                       if (filldir(dirent, p, len, filp->f_pos, inode_number, DT_UNKNOWN) < 0)
+                       if (!dir_emit(ctx, p, len, inode_number, DT_UNKNOWN))
                                break;
                }
-               filp->f_pos += de_len;
+               ctx->pos += de_len;
 
                continue;
        }
@@ -247,13 +246,12 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
  * handling split directory entries.. The real work is done by
  * "do_isofs_readdir()".
  */
-static int isofs_readdir(struct file *filp,
-               void *dirent, filldir_t filldir)
+static int isofs_readdir(struct file *file, struct dir_context *ctx)
 {
        int result;
        char *tmpname;
        struct iso_directory_record *tmpde;
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
 
        tmpname = (char *)__get_free_page(GFP_KERNEL);
        if (tmpname == NULL)
@@ -261,7 +259,7 @@ static int isofs_readdir(struct file *filp,
 
        tmpde = (struct iso_directory_record *) (tmpname+1024);
 
-       result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde);
+       result = do_isofs_readdir(inode, file, ctx, tmpname, tmpde);
 
        free_page((unsigned long) tmpname);
        return result;
@@ -271,7 +269,7 @@ const struct file_operations isofs_dir_operations =
 {
        .llseek = generic_file_llseek,
        .read = generic_read_dir,
-       .readdir = isofs_readdir,
+       .iterate = isofs_readdir,
 };
 
 /*
index e3e255c0a50968cd41efe9f9b9706290875f47fd..be0c39b66fe082402435673cab806426ba3a165e 100644 (file)
@@ -2019,16 +2019,20 @@ zap_buffer_unlocked:
  * void journal_invalidatepage() - invalidate a journal page
  * @journal: journal to use for flush
  * @page:    page to flush
- * @offset:  length of page to invalidate.
+ * @offset:  offset of the range to invalidate
+ * @length:  length of the range to invalidate
  *
- * Reap page buffers containing data after offset in page.
+ * Reap page buffers containing data in specified range in page.
  */
 void journal_invalidatepage(journal_t *journal,
                      struct page *page,
-                     unsigned long offset)
+                     unsigned int offset,
+                     unsigned int length)
 {
        struct buffer_head *head, *bh, *next;
+       unsigned int stop = offset + length;
        unsigned int curr_off = 0;
+       int partial_page = (offset || length < PAGE_CACHE_SIZE);
        int may_free = 1;
 
        if (!PageLocked(page))
@@ -2036,6 +2040,8 @@ void journal_invalidatepage(journal_t *journal,
        if (!page_has_buffers(page))
                return;
 
+       BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+
        /* We will potentially be playing with lists other than just the
         * data lists (especially for journaled data mode), so be
         * cautious in our locking. */
@@ -2045,11 +2051,14 @@ void journal_invalidatepage(journal_t *journal,
                unsigned int next_off = curr_off + bh->b_size;
                next = bh->b_this_page;
 
+               if (next_off > stop)
+                       return;
+
                if (offset <= curr_off) {
                        /* This block is wholly outside the truncation point */
                        lock_buffer(bh);
                        may_free &= journal_unmap_buffer(journal, bh,
-                                                        offset > 0);
+                                                        partial_page);
                        unlock_buffer(bh);
                }
                curr_off = next_off;
@@ -2057,7 +2066,7 @@ void journal_invalidatepage(journal_t *journal,
 
        } while (bh != head);
 
-       if (!offset) {
+       if (!partial_page) {
                if (may_free && try_to_free_buffers(page))
                        J_ASSERT(!page_has_buffers(page));
        }
index 69a48c2944da682c8a133fe75183c086ef08813b..5a9f5534d57be626cd4d723ee93aec6375faac62 100644 (file)
@@ -20,7 +20,7 @@ config JBD2
 
 config JBD2_DEBUG
        bool "JBD2 (ext4) debugging support"
-       depends on JBD2 && DEBUG_FS
+       depends on JBD2
        help
          If you are using the ext4 journaled file system (or
          potentially any other filesystem/device using JBD2), this option
@@ -29,7 +29,7 @@ config JBD2_DEBUG
          By default, the debugging output will be turned off.
 
          If you select Y here, then you will be able to turn on debugging
-         with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
+         with "echo N > /sys/module/jbd2/parameters/jbd2_debug", where N is a
          number between 1 and 5. The higher the number, the more debugging
          output is generated.  To turn debugging off again, do
-         "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
+         "echo 0 > /sys/module/jbd2/parameters/jbd2_debug".
index c78841ee81cf31a0c4c67a23bca294a32720cb5f..7f34f4716165311f66b62b81dd25a32e899dcf80 100644 (file)
@@ -120,8 +120,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
        int nblocks, space_left;
        /* assert_spin_locked(&journal->j_state_lock); */
 
-       nblocks = jbd_space_needed(journal);
-       while (__jbd2_log_space_left(journal) < nblocks) {
+       nblocks = jbd2_space_needed(journal);
+       while (jbd2_log_space_left(journal) < nblocks) {
                if (journal->j_flags & JBD2_ABORT)
                        return;
                write_unlock(&journal->j_state_lock);
@@ -140,8 +140,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
                 */
                write_lock(&journal->j_state_lock);
                spin_lock(&journal->j_list_lock);
-               nblocks = jbd_space_needed(journal);
-               space_left = __jbd2_log_space_left(journal);
+               nblocks = jbd2_space_needed(journal);
+               space_left = jbd2_log_space_left(journal);
                if (space_left < nblocks) {
                        int chkpt = journal->j_checkpoint_transactions != NULL;
                        tid_t tid = 0;
@@ -156,7 +156,15 @@ void __jbd2_log_wait_for_space(journal_t *journal)
                                /* We were able to recover space; yay! */
                                ;
                        } else if (tid) {
+                               /*
+                                * jbd2_journal_commit_transaction() may want
+                                * to take the checkpoint_mutex if JBD2_FLUSHED
+                                * is set.  So we need to temporarily drop it.
+                                */
+                               mutex_unlock(&journal->j_checkpoint_mutex);
                                jbd2_log_wait_commit(journal, tid);
+                               write_lock(&journal->j_state_lock);
+                               continue;
                        } else {
                                printk(KERN_ERR "%s: needed %d blocks and "
                                       "only had %d space available\n",
@@ -625,10 +633,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
 
        __jbd2_journal_drop_transaction(journal, transaction);
        jbd2_journal_free_transaction(transaction);
-
-       /* Just in case anybody was waiting for more transactions to be
-           checkpointed... */
-       wake_up(&journal->j_wait_logspace);
        ret = 1;
 out:
        return ret;
@@ -690,9 +694,7 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
        J_ASSERT(transaction->t_state == T_FINISHED);
        J_ASSERT(transaction->t_buffers == NULL);
        J_ASSERT(transaction->t_forget == NULL);
-       J_ASSERT(transaction->t_iobuf_list == NULL);
        J_ASSERT(transaction->t_shadow_list == NULL);
-       J_ASSERT(transaction->t_log_list == NULL);
        J_ASSERT(transaction->t_checkpoint_list == NULL);
        J_ASSERT(transaction->t_checkpoint_io_list == NULL);
        J_ASSERT(atomic_read(&transaction->t_updates) == 0);
index 0f53946f13c15d5ab53a5f46c595d2ae9eb3f652..559bec1a37b429bed7f639866fbadb6c148b10a0 100644 (file)
 #include <trace/events/jbd2.h>
 
 /*
- * Default IO end handler for temporary BJ_IO buffer_heads.
+ * IO end handler for temporary buffer_heads handling writes to the journal.
  */
 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
 {
+       struct buffer_head *orig_bh = bh->b_private;
+
        BUFFER_TRACE(bh, "");
        if (uptodate)
                set_buffer_uptodate(bh);
        else
                clear_buffer_uptodate(bh);
+       if (orig_bh) {
+               clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
+               smp_mb__after_clear_bit();
+               wake_up_bit(&orig_bh->b_state, BH_Shadow);
+       }
        unlock_buffer(bh);
 }
 
@@ -85,8 +92,7 @@ nope:
        __brelse(bh);
 }
 
-static void jbd2_commit_block_csum_set(journal_t *j,
-                                      struct journal_head *descriptor)
+static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
 {
        struct commit_header *h;
        __u32 csum;
@@ -94,12 +100,11 @@ static void jbd2_commit_block_csum_set(journal_t *j,
        if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
                return;
 
-       h = (struct commit_header *)(jh2bh(descriptor)->b_data);
+       h = (struct commit_header *)(bh->b_data);
        h->h_chksum_type = 0;
        h->h_chksum_size = 0;
        h->h_chksum[0] = 0;
-       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
-                          j->j_blocksize);
+       csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
        h->h_chksum[0] = cpu_to_be32(csum);
 }
 
@@ -116,7 +121,6 @@ static int journal_submit_commit_record(journal_t *journal,
                                        struct buffer_head **cbh,
                                        __u32 crc32_sum)
 {
-       struct journal_head *descriptor;
        struct commit_header *tmp;
        struct buffer_head *bh;
        int ret;
@@ -127,12 +131,10 @@ static int journal_submit_commit_record(journal_t *journal,
        if (is_journal_aborted(journal))
                return 0;
 
-       descriptor = jbd2_journal_get_descriptor_buffer(journal);
-       if (!descriptor)
+       bh = jbd2_journal_get_descriptor_buffer(journal);
+       if (!bh)
                return 1;
 
-       bh = jh2bh(descriptor);
-
        tmp = (struct commit_header *)bh->b_data;
        tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
        tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
@@ -146,9 +148,9 @@ static int journal_submit_commit_record(journal_t *journal,
                tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
                tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
        }
-       jbd2_commit_block_csum_set(journal, descriptor);
+       jbd2_commit_block_csum_set(journal, bh);
 
-       JBUFFER_TRACE(descriptor, "submit commit block");
+       BUFFER_TRACE(bh, "submit commit block");
        lock_buffer(bh);
        clear_buffer_dirty(bh);
        set_buffer_uptodate(bh);
@@ -180,7 +182,6 @@ static int journal_wait_on_commit_record(journal_t *journal,
        if (unlikely(!buffer_uptodate(bh)))
                ret = -EIO;
        put_bh(bh);            /* One for getblk() */
-       jbd2_journal_put_journal_head(bh2jh(bh));
 
        return ret;
 }
@@ -321,7 +322,7 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
 }
 
 static void jbd2_descr_block_csum_set(journal_t *j,
-                                     struct journal_head *descriptor)
+                                     struct buffer_head *bh)
 {
        struct jbd2_journal_block_tail *tail;
        __u32 csum;
@@ -329,12 +330,10 @@ static void jbd2_descr_block_csum_set(journal_t *j,
        if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
                return;
 
-       tail = (struct jbd2_journal_block_tail *)
-                       (jh2bh(descriptor)->b_data + j->j_blocksize -
+       tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
                        sizeof(struct jbd2_journal_block_tail));
        tail->t_checksum = 0;
-       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
-                          j->j_blocksize);
+       csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
        tail->t_checksum = cpu_to_be32(csum);
 }
 
@@ -343,20 +342,21 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
 {
        struct page *page = bh->b_page;
        __u8 *addr;
-       __u32 csum;
+       __u32 csum32;
 
        if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
                return;
 
        sequence = cpu_to_be32(sequence);
        addr = kmap_atomic(page);
-       csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
-                         sizeof(sequence));
-       csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
-                         bh->b_size);
+       csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+                            sizeof(sequence));
+       csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
+                            bh->b_size);
        kunmap_atomic(addr);
 
-       tag->t_checksum = cpu_to_be32(csum);
+       /* We only have space to store the lower 16 bits of the crc32c. */
+       tag->t_checksum = cpu_to_be16(csum32);
 }
 /*
  * jbd2_journal_commit_transaction
@@ -368,7 +368,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 {
        struct transaction_stats_s stats;
        transaction_t *commit_transaction;
-       struct journal_head *jh, *new_jh, *descriptor;
+       struct journal_head *jh;
+       struct buffer_head *descriptor;
        struct buffer_head **wbuf = journal->j_wbuf;
        int bufs;
        int flags;
@@ -392,6 +393,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        tid_t first_tid;
        int update_tail;
        int csum_size = 0;
+       LIST_HEAD(io_bufs);
+       LIST_HEAD(log_bufs);
 
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
                csum_size = sizeof(struct jbd2_journal_block_tail);
@@ -424,13 +427,13 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        J_ASSERT(journal->j_committing_transaction == NULL);
 
        commit_transaction = journal->j_running_transaction;
-       J_ASSERT(commit_transaction->t_state == T_RUNNING);
 
        trace_jbd2_start_commit(journal, commit_transaction);
        jbd_debug(1, "JBD2: starting commit of transaction %d\n",
                        commit_transaction->t_tid);
 
        write_lock(&journal->j_state_lock);
+       J_ASSERT(commit_transaction->t_state == T_RUNNING);
        commit_transaction->t_state = T_LOCKED;
 
        trace_jbd2_commit_locking(journal, commit_transaction);
@@ -520,6 +523,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
         */
        jbd2_journal_switch_revoke_table(journal);
 
+       /*
+        * Reserved credits cannot be claimed anymore, free them
+        */
+       atomic_sub(atomic_read(&journal->j_reserved_credits),
+                  &commit_transaction->t_outstanding_credits);
+
        trace_jbd2_commit_flushing(journal, commit_transaction);
        stats.run.rs_flushing = jiffies;
        stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
@@ -533,7 +542,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        wake_up(&journal->j_wait_transaction_locked);
        write_unlock(&journal->j_state_lock);
 
-       jbd_debug(3, "JBD2: commit phase 2\n");
+       jbd_debug(3, "JBD2: commit phase 2a\n");
 
        /*
         * Now start flushing things to disk, in the order they appear
@@ -545,10 +554,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
        blk_start_plug(&plug);
        jbd2_journal_write_revoke_records(journal, commit_transaction,
-                                         WRITE_SYNC);
+                                         &log_bufs, WRITE_SYNC);
        blk_finish_plug(&plug);
 
-       jbd_debug(3, "JBD2: commit phase 2\n");
+       jbd_debug(3, "JBD2: commit phase 2b\n");
 
        /*
         * Way to go: we have now written out all of the data for a
@@ -571,8 +580,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                 atomic_read(&commit_transaction->t_outstanding_credits));
 
        err = 0;
-       descriptor = NULL;
        bufs = 0;
+       descriptor = NULL;
        blk_start_plug(&plug);
        while (commit_transaction->t_buffers) {
 
@@ -604,8 +613,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                   record the metadata buffer. */
 
                if (!descriptor) {
-                       struct buffer_head *bh;
-
                        J_ASSERT (bufs == 0);
 
                        jbd_debug(4, "JBD2: get descriptor\n");
@@ -616,26 +623,26 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                                continue;
                        }
 
-                       bh = jh2bh(descriptor);
                        jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
-                               (unsigned long long)bh->b_blocknr, bh->b_data);
-                       header = (journal_header_t *)&bh->b_data[0];
+                               (unsigned long long)descriptor->b_blocknr,
+                               descriptor->b_data);
+                       header = (journal_header_t *)descriptor->b_data;
                        header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
                        header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
                        header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
 
-                       tagp = &bh->b_data[sizeof(journal_header_t)];
-                       space_left = bh->b_size - sizeof(journal_header_t);
+                       tagp = &descriptor->b_data[sizeof(journal_header_t)];
+                       space_left = descriptor->b_size -
+                                               sizeof(journal_header_t);
                        first_tag = 1;
-                       set_buffer_jwrite(bh);
-                       set_buffer_dirty(bh);
-                       wbuf[bufs++] = bh;
+                       set_buffer_jwrite(descriptor);
+                       set_buffer_dirty(descriptor);
+                       wbuf[bufs++] = descriptor;
 
                        /* Record it so that we can wait for IO
                            completion later */
-                       BUFFER_TRACE(bh, "ph3: file as descriptor");
-                       jbd2_journal_file_buffer(descriptor, commit_transaction,
-                                       BJ_LogCtl);
+                       BUFFER_TRACE(descriptor, "ph3: file as descriptor");
+                       jbd2_file_log_bh(&log_bufs, descriptor);
                }
 
                /* Where is the buffer to be written? */
@@ -658,29 +665,22 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
                /* Bump b_count to prevent truncate from stumbling over
                    the shadowed buffer!  @@@ This can go if we ever get
-                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
+                   rid of the shadow pairing of buffers. */
                atomic_inc(&jh2bh(jh)->b_count);
 
-               /* Make a temporary IO buffer with which to write it out
-                   (this will requeue both the metadata buffer and the
-                   temporary IO buffer). new_bh goes on BJ_IO*/
-
-               set_bit(BH_JWrite, &jh2bh(jh)->b_state);
                /*
-                * akpm: jbd2_journal_write_metadata_buffer() sets
-                * new_bh->b_transaction to commit_transaction.
-                * We need to clean this up before we release new_bh
-                * (which is of type BJ_IO)
+                * Make a temporary IO buffer with which to write it out
+                * (this will requeue the metadata buffer to BJ_Shadow).
                 */
+               set_bit(BH_JWrite, &jh2bh(jh)->b_state);
                JBUFFER_TRACE(jh, "ph3: write metadata");
                flags = jbd2_journal_write_metadata_buffer(commit_transaction,
-                                                     jh, &new_jh, blocknr);
+                                               jh, &wbuf[bufs], blocknr);
                if (flags < 0) {
                        jbd2_journal_abort(journal, flags);
                        continue;
                }
-               set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
-               wbuf[bufs++] = jh2bh(new_jh);
+               jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
 
                /* Record the new block's tag in the current descriptor
                    buffer */
@@ -694,10 +694,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                tag = (journal_block_tag_t *) tagp;
                write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
                tag->t_flags = cpu_to_be16(tag_flag);
-               jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
+               jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
                                        commit_transaction->t_tid);
                tagp += tag_bytes;
                space_left -= tag_bytes;
+               bufs++;
 
                if (first_tag) {
                        memcpy (tagp, journal->j_uuid, 16);
@@ -809,7 +810,7 @@ start_journal_io:
            the log.  Before we can commit it, wait for the IO so far to
            complete.  Control buffers being written are on the
            transaction's t_log_list queue, and metadata buffers are on
-           the t_iobuf_list queue.
+           the io_bufs list.
 
           Wait for the buffers in reverse order.  That way we are
           less likely to be woken up until all IOs have completed, and
@@ -818,47 +819,33 @@ start_journal_io:
 
        jbd_debug(3, "JBD2: commit phase 3\n");
 
-       /*
-        * akpm: these are BJ_IO, and j_list_lock is not needed.
-        * See __journal_try_to_free_buffer.
-        */
-wait_for_iobuf:
-       while (commit_transaction->t_iobuf_list != NULL) {
-               struct buffer_head *bh;
+       while (!list_empty(&io_bufs)) {
+               struct buffer_head *bh = list_entry(io_bufs.prev,
+                                                   struct buffer_head,
+                                                   b_assoc_buffers);
 
-               jh = commit_transaction->t_iobuf_list->b_tprev;
-               bh = jh2bh(jh);
-               if (buffer_locked(bh)) {
-                       wait_on_buffer(bh);
-                       goto wait_for_iobuf;
-               }
-               if (cond_resched())
-                       goto wait_for_iobuf;
+               wait_on_buffer(bh);
+               cond_resched();
 
                if (unlikely(!buffer_uptodate(bh)))
                        err = -EIO;
-
-               clear_buffer_jwrite(bh);
-
-               JBUFFER_TRACE(jh, "ph4: unfile after journal write");
-               jbd2_journal_unfile_buffer(journal, jh);
+               jbd2_unfile_log_bh(bh);
 
                /*
-                * ->t_iobuf_list should contain only dummy buffer_heads
-                * which were created by jbd2_journal_write_metadata_buffer().
+                * The list contains temporary buffer heads created by
+                * jbd2_journal_write_metadata_buffer().
                 */
                BUFFER_TRACE(bh, "dumping temporary bh");
-               jbd2_journal_put_journal_head(jh);
                __brelse(bh);
                J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
                free_buffer_head(bh);
 
-               /* We also have to unlock and free the corresponding
-                   shadowed buffer */
+               /* We also have to refile the corresponding shadowed buffer */
                jh = commit_transaction->t_shadow_list->b_tprev;
                bh = jh2bh(jh);
-               clear_bit(BH_JWrite, &bh->b_state);
+               clear_buffer_jwrite(bh);
                J_ASSERT_BH(bh, buffer_jbddirty(bh));
+               J_ASSERT_BH(bh, !buffer_shadow(bh));
 
                /* The metadata is now released for reuse, but we need
                    to remember it against this transaction so that when
@@ -866,14 +853,6 @@ wait_for_iobuf:
                    required. */
                JBUFFER_TRACE(jh, "file as BJ_Forget");
                jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
-               /*
-                * Wake up any transactions which were waiting for this IO to
-                * complete. The barrier must be here so that changes by
-                * jbd2_journal_file_buffer() take effect before wake_up_bit()
-                * does the waitqueue check.
-                */
-               smp_mb();
-               wake_up_bit(&bh->b_state, BH_Unshadow);
                JBUFFER_TRACE(jh, "brelse shadowed buffer");
                __brelse(bh);
        }
@@ -883,26 +862,19 @@ wait_for_iobuf:
        jbd_debug(3, "JBD2: commit phase 4\n");
 
        /* Here we wait for the revoke record and descriptor record buffers */
- wait_for_ctlbuf:
-       while (commit_transaction->t_log_list != NULL) {
+       while (!list_empty(&log_bufs)) {
                struct buffer_head *bh;
 
-               jh = commit_transaction->t_log_list->b_tprev;
-               bh = jh2bh(jh);
-               if (buffer_locked(bh)) {
-                       wait_on_buffer(bh);
-                       goto wait_for_ctlbuf;
-               }
-               if (cond_resched())
-                       goto wait_for_ctlbuf;
+               bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
+               wait_on_buffer(bh);
+               cond_resched();
 
                if (unlikely(!buffer_uptodate(bh)))
                        err = -EIO;
 
                BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
                clear_buffer_jwrite(bh);
-               jbd2_journal_unfile_buffer(journal, jh);
-               jbd2_journal_put_journal_head(jh);
+               jbd2_unfile_log_bh(bh);
                __brelse(bh);           /* One for getblk */
                /* AKPM: bforget here */
        }
@@ -952,9 +924,7 @@ wait_for_iobuf:
        J_ASSERT(list_empty(&commit_transaction->t_inode_list));
        J_ASSERT(commit_transaction->t_buffers == NULL);
        J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
-       J_ASSERT(commit_transaction->t_iobuf_list == NULL);
        J_ASSERT(commit_transaction->t_shadow_list == NULL);
-       J_ASSERT(commit_transaction->t_log_list == NULL);
 
 restart_loop:
        /*
index 95457576e434b2624475bbdab2758974eefbd9d5..02c7ad9d7a412a6ce9a81c048d832d2238671306 100644 (file)
@@ -103,6 +103,24 @@ EXPORT_SYMBOL(jbd2_inode_cache);
 static void __journal_abort_soft (journal_t *journal, int errno);
 static int jbd2_journal_create_slab(size_t slab_size);
 
+#ifdef CONFIG_JBD2_DEBUG
+void __jbd2_debug(int level, const char *file, const char *func,
+                 unsigned int line, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       if (level > jbd2_journal_enable_debug)
+               return;
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
+       va_end(args);
+}
+EXPORT_SYMBOL(__jbd2_debug);
+#endif
+
 /* Checksumming functions */
 int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
 {
@@ -310,14 +328,12 @@ static void journal_kill_thread(journal_t *journal)
  *
  * If the source buffer has already been modified by a new transaction
  * since we took the last commit snapshot, we use the frozen copy of
- * that data for IO.  If we end up using the existing buffer_head's data
- * for the write, then we *have* to lock the buffer to prevent anyone
- * else from using and possibly modifying it while the IO is in
- * progress.
+ * that data for IO. If we end up using the existing buffer_head's data
+ * for the write, then we have to make sure nobody modifies it while the
+ * IO is in progress. do_get_write_access() handles this.
  *
- * The function returns a pointer to the buffer_heads to be used for IO.
- *
- * We assume that the journal has already been locked in this function.
+ * The function returns a pointer to the buffer_head to be used for IO.
+ * 
  *
  * Return value:
  *  <0: Error
@@ -330,15 +346,14 @@ static void journal_kill_thread(journal_t *journal)
 
 int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
                                  struct journal_head  *jh_in,
-                                 struct journal_head **jh_out,
-                                 unsigned long long blocknr)
+                                 struct buffer_head **bh_out,
+                                 sector_t blocknr)
 {
        int need_copy_out = 0;
        int done_copy_out = 0;
        int do_escape = 0;
        char *mapped_data;
        struct buffer_head *new_bh;
-       struct journal_head *new_jh;
        struct page *new_page;
        unsigned int new_offset;
        struct buffer_head *bh_in = jh2bh(jh_in);
@@ -368,14 +383,13 @@ retry_alloc:
 
        /* keep subsequent assertions sane */
        atomic_set(&new_bh->b_count, 1);
-       new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
 
+       jbd_lock_bh_state(bh_in);
+repeat:
        /*
         * If a new transaction has already done a buffer copy-out, then
         * we use that version of the data for the commit.
         */
-       jbd_lock_bh_state(bh_in);
-repeat:
        if (jh_in->b_frozen_data) {
                done_copy_out = 1;
                new_page = virt_to_page(jh_in->b_frozen_data);
@@ -415,7 +429,7 @@ repeat:
                jbd_unlock_bh_state(bh_in);
                tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
                if (!tmp) {
-                       jbd2_journal_put_journal_head(new_jh);
+                       brelse(new_bh);
                        return -ENOMEM;
                }
                jbd_lock_bh_state(bh_in);
@@ -426,7 +440,7 @@ repeat:
 
                jh_in->b_frozen_data = tmp;
                mapped_data = kmap_atomic(new_page);
-               memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
+               memcpy(tmp, mapped_data + new_offset, bh_in->b_size);
                kunmap_atomic(mapped_data);
 
                new_page = virt_to_page(tmp);
@@ -452,14 +466,14 @@ repeat:
        }
 
        set_bh_page(new_bh, new_page, new_offset);
-       new_jh->b_transaction = NULL;
-       new_bh->b_size = jh2bh(jh_in)->b_size;
-       new_bh->b_bdev = transaction->t_journal->j_dev;
+       new_bh->b_size = bh_in->b_size;
+       new_bh->b_bdev = journal->j_dev;
        new_bh->b_blocknr = blocknr;
+       new_bh->b_private = bh_in;
        set_buffer_mapped(new_bh);
        set_buffer_dirty(new_bh);
 
-       *jh_out = new_jh;
+       *bh_out = new_bh;
 
        /*
         * The to-be-written buffer needs to get moved to the io queue,
@@ -470,11 +484,9 @@ repeat:
        spin_lock(&journal->j_list_lock);
        __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
        spin_unlock(&journal->j_list_lock);
+       set_buffer_shadow(bh_in);
        jbd_unlock_bh_state(bh_in);
 
-       JBUFFER_TRACE(new_jh, "file as BJ_IO");
-       jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
-
        return do_escape | (done_copy_out << 1);
 }
 
@@ -483,35 +495,6 @@ repeat:
  * journal, so that we can begin checkpointing when appropriate.
  */
 
-/*
- * __jbd2_log_space_left: Return the number of free blocks left in the journal.
- *
- * Called with the journal already locked.
- *
- * Called under j_state_lock
- */
-
-int __jbd2_log_space_left(journal_t *journal)
-{
-       int left = journal->j_free;
-
-       /* assert_spin_locked(&journal->j_state_lock); */
-
-       /*
-        * Be pessimistic here about the number of those free blocks which
-        * might be required for log descriptor control blocks.
-        */
-
-#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
-
-       left -= MIN_LOG_RESERVED_BLOCKS;
-
-       if (left <= 0)
-               return 0;
-       left -= (left >> 3);
-       return left;
-}
-
 /*
  * Called with j_state_lock locked for writing.
  * Returns true if a transaction commit was started.
@@ -564,20 +547,17 @@ int jbd2_log_start_commit(journal_t *journal, tid_t tid)
 }
 
 /*
- * Force and wait upon a commit if the calling process is not within
- * transaction.  This is used for forcing out undo-protected data which contains
- * bitmaps, when the fs is running out of space.
- *
- * We can only force the running transaction if we don't have an active handle;
- * otherwise, we will deadlock.
- *
- * Returns true if a transaction was started.
+ * Force and wait any uncommitted transactions.  We can only force the running
+ * transaction if we don't have an active handle, otherwise, we will deadlock.
+ * Returns: <0 in case of error,
+ *           0 if nothing to commit,
+ *           1 if transaction was successfully committed.
  */
-int jbd2_journal_force_commit_nested(journal_t *journal)
+static int __jbd2_journal_force_commit(journal_t *journal)
 {
        transaction_t *transaction = NULL;
        tid_t tid;
-       int need_to_start = 0;
+       int need_to_start = 0, ret = 0;
 
        read_lock(&journal->j_state_lock);
        if (journal->j_running_transaction && !current->journal_info) {
@@ -588,16 +568,53 @@ int jbd2_journal_force_commit_nested(journal_t *journal)
                transaction = journal->j_committing_transaction;
 
        if (!transaction) {
+               /* Nothing to commit */
                read_unlock(&journal->j_state_lock);
-               return 0;       /* Nothing to retry */
+               return 0;
        }
-
        tid = transaction->t_tid;
        read_unlock(&journal->j_state_lock);
        if (need_to_start)
                jbd2_log_start_commit(journal, tid);
-       jbd2_log_wait_commit(journal, tid);
-       return 1;
+       ret = jbd2_log_wait_commit(journal, tid);
+       if (!ret)
+               ret = 1;
+
+       return ret;
+}
+
+/**
+ * Force and wait upon a commit if the calling process is not within
+ * transaction.  This is used for forcing out undo-protected data which contains
+ * bitmaps, when the fs is running out of space.
+ *
+ * @journal: journal to force
+ * Returns true if progress was made.
+ */
+int jbd2_journal_force_commit_nested(journal_t *journal)
+{
+       int ret;
+
+       ret = __jbd2_journal_force_commit(journal);
+       return ret > 0;
+}
+
+/**
+ * int journal_force_commit() - force any uncommitted transactions
+ * @journal: journal to force
+ *
+ * Caller want unconditional commit. We can only force the running transaction
+ * if we don't have an active handle, otherwise, we will deadlock.
+ */
+int jbd2_journal_force_commit(journal_t *journal)
+{
+       int ret;
+
+       J_ASSERT(!current->journal_info);
+       ret = __jbd2_journal_force_commit(journal);
+       if (ret > 0)
+               ret = 0;
+       return ret;
 }
 
 /*
@@ -798,7 +815,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
  * But we don't bother doing that, so there will be coherency problems with
  * mmaps of blockdevs which hold live JBD-controlled filesystems.
  */
-struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
+struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
 {
        struct buffer_head *bh;
        unsigned long long blocknr;
@@ -817,7 +834,7 @@ struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
        BUFFER_TRACE(bh, "return this buffer");
-       return jbd2_journal_add_journal_head(bh);
+       return bh;
 }
 
 /*
@@ -1062,11 +1079,10 @@ static journal_t * journal_init_common (void)
                return NULL;
 
        init_waitqueue_head(&journal->j_wait_transaction_locked);
-       init_waitqueue_head(&journal->j_wait_logspace);
        init_waitqueue_head(&journal->j_wait_done_commit);
-       init_waitqueue_head(&journal->j_wait_checkpoint);
        init_waitqueue_head(&journal->j_wait_commit);
        init_waitqueue_head(&journal->j_wait_updates);
+       init_waitqueue_head(&journal->j_wait_reserved);
        mutex_init(&journal->j_barrier);
        mutex_init(&journal->j_checkpoint_mutex);
        spin_lock_init(&journal->j_revoke_lock);
@@ -1076,6 +1092,7 @@ static journal_t * journal_init_common (void)
        journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
        journal->j_min_batch_time = 0;
        journal->j_max_batch_time = 15000; /* 15ms */
+       atomic_set(&journal->j_reserved_credits, 0);
 
        /* The journal is marked for error until we succeed with recovery! */
        journal->j_flags = JBD2_ABORT;
@@ -1318,6 +1335,7 @@ static int journal_reset(journal_t *journal)
 static void jbd2_write_superblock(journal_t *journal, int write_op)
 {
        struct buffer_head *bh = journal->j_sb_buffer;
+       journal_superblock_t *sb = journal->j_superblock;
        int ret;
 
        trace_jbd2_write_superblock(journal, write_op);
@@ -1339,6 +1357,7 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
                clear_buffer_write_io_error(bh);
                set_buffer_uptodate(bh);
        }
+       jbd2_superblock_csum_set(journal, sb);
        get_bh(bh);
        bh->b_end_io = end_buffer_write_sync;
        ret = submit_bh(write_op, bh);
@@ -1435,7 +1454,6 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
        jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
                  journal->j_errno);
        sb->s_errno    = cpu_to_be32(journal->j_errno);
-       jbd2_superblock_csum_set(journal, sb);
        read_unlock(&journal->j_state_lock);
 
        jbd2_write_superblock(journal, WRITE_SYNC);
@@ -2325,13 +2343,13 @@ static struct journal_head *journal_alloc_journal_head(void)
 #ifdef CONFIG_JBD2_DEBUG
        atomic_inc(&nr_journal_heads);
 #endif
-       ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
+       ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
        if (!ret) {
                jbd_debug(1, "out of memory for journal_head\n");
                pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
                while (!ret) {
                        yield();
-                       ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
+                       ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
                }
        }
        return ret;
@@ -2393,10 +2411,8 @@ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
        struct journal_head *new_jh = NULL;
 
 repeat:
-       if (!buffer_jbd(bh)) {
+       if (!buffer_jbd(bh))
                new_jh = journal_alloc_journal_head();
-               memset(new_jh, 0, sizeof(*new_jh));
-       }
 
        jbd_lock_bh_journal_head(bh);
        if (buffer_jbd(bh)) {
index 626846bac32f8a5f2c01d4e3f39db9332642a25c..d4851464b57e5ab4aac7e5851f457c796c244795 100644 (file)
@@ -399,18 +399,17 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
 static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
                                      void *buf, __u32 sequence)
 {
-       __u32 provided, calculated;
+       __u32 csum32;
 
        if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
                return 1;
 
        sequence = cpu_to_be32(sequence);
-       calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
-                                sizeof(sequence));
-       calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize);
-       provided = be32_to_cpu(tag->t_checksum);
+       csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
+                            sizeof(sequence));
+       csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
 
-       return provided == cpu_to_be32(calculated);
+       return tag->t_checksum == cpu_to_be16(csum32);
 }
 
 static int do_one_pass(journal_t *journal,
index f30b80b4ce8bef98cab621bf731e13682661ca6d..198c9c10276dadf45983f3615f0804f27ee085e7 100644 (file)
@@ -122,9 +122,10 @@ struct jbd2_revoke_table_s
 
 #ifdef __KERNEL__
 static void write_one_revoke_record(journal_t *, transaction_t *,
-                                   struct journal_head **, int *,
+                                   struct list_head *,
+                                   struct buffer_head **, int *,
                                    struct jbd2_revoke_record_s *, int);
-static void flush_descriptor(journal_t *, struct journal_head *, int, int);
+static void flush_descriptor(journal_t *, struct buffer_head *, int, int);
 #endif
 
 /* Utility functions to maintain the revoke table */
@@ -531,9 +532,10 @@ void jbd2_journal_switch_revoke_table(journal_t *journal)
  */
 void jbd2_journal_write_revoke_records(journal_t *journal,
                                       transaction_t *transaction,
+                                      struct list_head *log_bufs,
                                       int write_op)
 {
-       struct journal_head *descriptor;
+       struct buffer_head *descriptor;
        struct jbd2_revoke_record_s *record;
        struct jbd2_revoke_table_s *revoke;
        struct list_head *hash_list;
@@ -553,7 +555,7 @@ void jbd2_journal_write_revoke_records(journal_t *journal,
                while (!list_empty(hash_list)) {
                        record = (struct jbd2_revoke_record_s *)
                                hash_list->next;
-                       write_one_revoke_record(journal, transaction,
+                       write_one_revoke_record(journal, transaction, log_bufs,
                                                &descriptor, &offset,
                                                record, write_op);
                        count++;
@@ -573,13 +575,14 @@ void jbd2_journal_write_revoke_records(journal_t *journal,
 
 static void write_one_revoke_record(journal_t *journal,
                                    transaction_t *transaction,
-                                   struct journal_head **descriptorp,
+                                   struct list_head *log_bufs,
+                                   struct buffer_head **descriptorp,
                                    int *offsetp,
                                    struct jbd2_revoke_record_s *record,
                                    int write_op)
 {
        int csum_size = 0;
-       struct journal_head *descriptor;
+       struct buffer_head *descriptor;
        int offset;
        journal_header_t *header;
 
@@ -609,26 +612,26 @@ static void write_one_revoke_record(journal_t *journal,
                descriptor = jbd2_journal_get_descriptor_buffer(journal);
                if (!descriptor)
                        return;
-               header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
+               header = (journal_header_t *)descriptor->b_data;
                header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
                header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
                header->h_sequence  = cpu_to_be32(transaction->t_tid);
 
                /* Record it so that we can wait for IO completion later */
-               JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
-               jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
+               BUFFER_TRACE(descriptor, "file in log_bufs");
+               jbd2_file_log_bh(log_bufs, descriptor);
 
                offset = sizeof(jbd2_journal_revoke_header_t);
                *descriptorp = descriptor;
        }
 
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
-               * ((__be64 *)(&jh2bh(descriptor)->b_data[offset])) =
+               * ((__be64 *)(&descriptor->b_data[offset])) =
                        cpu_to_be64(record->blocknr);
                offset += 8;
 
        } else {
-               * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
+               * ((__be32 *)(&descriptor->b_data[offset])) =
                        cpu_to_be32(record->blocknr);
                offset += 4;
        }
@@ -636,8 +639,7 @@ static void write_one_revoke_record(journal_t *journal,
        *offsetp = offset;
 }
 
-static void jbd2_revoke_csum_set(journal_t *j,
-                                struct journal_head *descriptor)
+static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
 {
        struct jbd2_journal_revoke_tail *tail;
        __u32 csum;
@@ -645,12 +647,10 @@ static void jbd2_revoke_csum_set(journal_t *j,
        if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
                return;
 
-       tail = (struct jbd2_journal_revoke_tail *)
-                       (jh2bh(descriptor)->b_data + j->j_blocksize -
+       tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
                        sizeof(struct jbd2_journal_revoke_tail));
        tail->r_checksum = 0;
-       csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
-                          j->j_blocksize);
+       csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
        tail->r_checksum = cpu_to_be32(csum);
 }
 
@@ -662,25 +662,24 @@ static void jbd2_revoke_csum_set(journal_t *j,
  */
 
 static void flush_descriptor(journal_t *journal,
-                            struct journal_head *descriptor,
+                            struct buffer_head *descriptor,
                             int offset, int write_op)
 {
        jbd2_journal_revoke_header_t *header;
-       struct buffer_head *bh = jh2bh(descriptor);
 
        if (is_journal_aborted(journal)) {
-               put_bh(bh);
+               put_bh(descriptor);
                return;
        }
 
-       header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
+       header = (jbd2_journal_revoke_header_t *)descriptor->b_data;
        header->r_count = cpu_to_be32(offset);
        jbd2_revoke_csum_set(journal, descriptor);
 
-       set_buffer_jwrite(bh);
-       BUFFER_TRACE(bh, "write");
-       set_buffer_dirty(bh);
-       write_dirty_buffer(bh, write_op);
+       set_buffer_jwrite(descriptor);
+       BUFFER_TRACE(descriptor, "write");
+       set_buffer_dirty(descriptor);
+       write_dirty_buffer(descriptor, write_op);
 }
 #endif
 
index 10f524c59ea88d48bf4f85f42e6fb2eca0d7a55b..7aa9a32573bba885d166e484c367bffc06bcd9e2 100644 (file)
@@ -89,7 +89,8 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
        transaction->t_expires = jiffies + journal->j_commit_interval;
        spin_lock_init(&transaction->t_handle_lock);
        atomic_set(&transaction->t_updates, 0);
-       atomic_set(&transaction->t_outstanding_credits, 0);
+       atomic_set(&transaction->t_outstanding_credits,
+                  atomic_read(&journal->j_reserved_credits));
        atomic_set(&transaction->t_handle_count, 0);
        INIT_LIST_HEAD(&transaction->t_inode_list);
        INIT_LIST_HEAD(&transaction->t_private_list);
@@ -140,6 +141,112 @@ static inline void update_t_max_wait(transaction_t *transaction,
 #endif
 }
 
+/*
+ * Wait until running transaction passes T_LOCKED state. Also starts the commit
+ * if needed. The function expects running transaction to exist and releases
+ * j_state_lock.
+ */
+static void wait_transaction_locked(journal_t *journal)
+       __releases(journal->j_state_lock)
+{
+       DEFINE_WAIT(wait);
+       int need_to_start;
+       tid_t tid = journal->j_running_transaction->t_tid;
+
+       prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+                       TASK_UNINTERRUPTIBLE);
+       need_to_start = !tid_geq(journal->j_commit_request, tid);
+       read_unlock(&journal->j_state_lock);
+       if (need_to_start)
+               jbd2_log_start_commit(journal, tid);
+       schedule();
+       finish_wait(&journal->j_wait_transaction_locked, &wait);
+}
+
+static void sub_reserved_credits(journal_t *journal, int blocks)
+{
+       atomic_sub(blocks, &journal->j_reserved_credits);
+       wake_up(&journal->j_wait_reserved);
+}
+
+/*
+ * Wait until we can add credits for handle to the running transaction.  Called
+ * with j_state_lock held for reading. Returns 0 if handle joined the running
+ * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
+ * caller must retry.
+ */
+static int add_transaction_credits(journal_t *journal, int blocks,
+                                  int rsv_blocks)
+{
+       transaction_t *t = journal->j_running_transaction;
+       int needed;
+       int total = blocks + rsv_blocks;
+
+       /*
+        * If the current transaction is locked down for commit, wait
+        * for the lock to be released.
+        */
+       if (t->t_state == T_LOCKED) {
+               wait_transaction_locked(journal);
+               return 1;
+       }
+
+       /*
+        * If there is not enough space left in the log to write all
+        * potential buffers requested by this operation, we need to
+        * stall pending a log checkpoint to free some more log space.
+        */
+       needed = atomic_add_return(total, &t->t_outstanding_credits);
+       if (needed > journal->j_max_transaction_buffers) {
+               /*
+                * If the current transaction is already too large,
+                * then start to commit it: we can then go back and
+                * attach this handle to a new transaction.
+                */
+               atomic_sub(total, &t->t_outstanding_credits);
+               wait_transaction_locked(journal);
+               return 1;
+       }
+
+       /*
+        * The commit code assumes that it can get enough log space
+        * without forcing a checkpoint.  This is *critical* for
+        * correctness: a checkpoint of a buffer which is also
+        * associated with a committing transaction creates a deadlock,
+        * so commit simply cannot force through checkpoints.
+        *
+        * We must therefore ensure the necessary space in the journal
+        * *before* starting to dirty potentially checkpointed buffers
+        * in the new transaction.
+        */
+       if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
+               atomic_sub(total, &t->t_outstanding_credits);
+               read_unlock(&journal->j_state_lock);
+               write_lock(&journal->j_state_lock);
+               if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
+                       __jbd2_log_wait_for_space(journal);
+               write_unlock(&journal->j_state_lock);
+               return 1;
+       }
+
+       /* No reservation? We are done... */
+       if (!rsv_blocks)
+               return 0;
+
+       needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
+       /* We allow at most half of a transaction to be reserved */
+       if (needed > journal->j_max_transaction_buffers / 2) {
+               sub_reserved_credits(journal, rsv_blocks);
+               atomic_sub(total, &t->t_outstanding_credits);
+               read_unlock(&journal->j_state_lock);
+               wait_event(journal->j_wait_reserved,
+                        atomic_read(&journal->j_reserved_credits) + rsv_blocks
+                        <= journal->j_max_transaction_buffers / 2);
+               return 1;
+       }
+       return 0;
+}
+
 /*
  * start_this_handle: Given a handle, deal with any locking or stalling
  * needed to make sure that there is enough journal space for the handle
@@ -151,18 +258,24 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
                             gfp_t gfp_mask)
 {
        transaction_t   *transaction, *new_transaction = NULL;
-       tid_t           tid;
-       int             needed, need_to_start;
-       int             nblocks = handle->h_buffer_credits;
+       int             blocks = handle->h_buffer_credits;
+       int             rsv_blocks = 0;
        unsigned long ts = jiffies;
 
-       if (nblocks > journal->j_max_transaction_buffers) {
+       /*
+        * 1/2 of transaction can be reserved so we can practically handle
+        * only 1/2 of maximum transaction size per operation
+        */
+       if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) {
                printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
-                      current->comm, nblocks,
-                      journal->j_max_transaction_buffers);
+                      current->comm, blocks,
+                      journal->j_max_transaction_buffers / 2);
                return -ENOSPC;
        }
 
+       if (handle->h_rsv_handle)
+               rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
+
 alloc_transaction:
        if (!journal->j_running_transaction) {
                new_transaction = kmem_cache_zalloc(transaction_cache,
@@ -199,8 +312,12 @@ repeat:
                return -EROFS;
        }
 
-       /* Wait on the journal's transaction barrier if necessary */
-       if (journal->j_barrier_count) {
+       /*
+        * Wait on the journal's transaction barrier if necessary. Specifically
+        * we allow reserved handles to proceed because otherwise commit could
+        * deadlock on page writeback not being able to complete.
+        */
+       if (!handle->h_reserved && journal->j_barrier_count) {
                read_unlock(&journal->j_state_lock);
                wait_event(journal->j_wait_transaction_locked,
                                journal->j_barrier_count == 0);
@@ -213,7 +330,7 @@ repeat:
                        goto alloc_transaction;
                write_lock(&journal->j_state_lock);
                if (!journal->j_running_transaction &&
-                   !journal->j_barrier_count) {
+                   (handle->h_reserved || !journal->j_barrier_count)) {
                        jbd2_get_transaction(journal, new_transaction);
                        new_transaction = NULL;
                }
@@ -223,85 +340,18 @@ repeat:
 
        transaction = journal->j_running_transaction;
 
-       /*
-        * If the current transaction is locked down for commit, wait for the
-        * lock to be released.
-        */
-       if (transaction->t_state == T_LOCKED) {
-               DEFINE_WAIT(wait);
-
-               prepare_to_wait(&journal->j_wait_transaction_locked,
-                                       &wait, TASK_UNINTERRUPTIBLE);
-               read_unlock(&journal->j_state_lock);
-               schedule();
-               finish_wait(&journal->j_wait_transaction_locked, &wait);
-               goto repeat;
-       }
-
-       /*
-        * If there is not enough space left in the log to write all potential
-        * buffers requested by this operation, we need to stall pending a log
-        * checkpoint to free some more log space.
-        */
-       needed = atomic_add_return(nblocks,
-                                  &transaction->t_outstanding_credits);
-
-       if (needed > journal->j_max_transaction_buffers) {
+       if (!handle->h_reserved) {
+               /* We may have dropped j_state_lock - restart in that case */
+               if (add_transaction_credits(journal, blocks, rsv_blocks))
+                       goto repeat;
+       } else {
                /*
-                * If the current transaction is already too large, then start
-                * to commit it: we can then go back and attach this handle to
-                * a new transaction.
+                * We have handle reserved so we are allowed to join T_LOCKED
+                * transaction and we don't have to check for transaction size
+                * and journal space.
                 */
-               DEFINE_WAIT(wait);
-
-               jbd_debug(2, "Handle %p starting new commit...\n", handle);
-               atomic_sub(nblocks, &transaction->t_outstanding_credits);
-               prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
-                               TASK_UNINTERRUPTIBLE);
-               tid = transaction->t_tid;
-               need_to_start = !tid_geq(journal->j_commit_request, tid);
-               read_unlock(&journal->j_state_lock);
-               if (need_to_start)
-                       jbd2_log_start_commit(journal, tid);
-               schedule();
-               finish_wait(&journal->j_wait_transaction_locked, &wait);
-               goto repeat;
-       }
-
-       /*
-        * The commit code assumes that it can get enough log space
-        * without forcing a checkpoint.  This is *critical* for
-        * correctness: a checkpoint of a buffer which is also
-        * associated with a committing transaction creates a deadlock,
-        * so commit simply cannot force through checkpoints.
-        *
-        * We must therefore ensure the necessary space in the journal
-        * *before* starting to dirty potentially checkpointed buffers
-        * in the new transaction.
-        *
-        * The worst part is, any transaction currently committing can
-        * reduce the free space arbitrarily.  Be careful to account for
-        * those buffers when checkpointing.
-        */
-
-       /*
-        * @@@ AKPM: This seems rather over-defensive.  We're giving commit
-        * a _lot_ of headroom: 1/4 of the journal plus the size of
-        * the committing transaction.  Really, we only need to give it
-        * committing_transaction->t_outstanding_credits plus "enough" for
-        * the log control blocks.
-        * Also, this test is inconsistent with the matching one in
-        * jbd2_journal_extend().
-        */
-       if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
-               jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
-               atomic_sub(nblocks, &transaction->t_outstanding_credits);
-               read_unlock(&journal->j_state_lock);
-               write_lock(&journal->j_state_lock);
-               if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
-                       __jbd2_log_wait_for_space(journal);
-               write_unlock(&journal->j_state_lock);
-               goto repeat;
+               sub_reserved_credits(journal, blocks);
+               handle->h_reserved = 0;
        }
 
        /* OK, account for the buffers that this operation expects to
@@ -309,15 +359,16 @@ repeat:
         */
        update_t_max_wait(transaction, ts);
        handle->h_transaction = transaction;
-       handle->h_requested_credits = nblocks;
+       handle->h_requested_credits = blocks;
        handle->h_start_jiffies = jiffies;
        atomic_inc(&transaction->t_updates);
        atomic_inc(&transaction->t_handle_count);
-       jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
-                 handle, nblocks,
+       jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
+                 handle, blocks,
                  atomic_read(&transaction->t_outstanding_credits),
-                 __jbd2_log_space_left(journal));
+                 jbd2_log_space_left(journal));
        read_unlock(&journal->j_state_lock);
+       current->journal_info = handle;
 
        lock_map_acquire(&handle->h_lockdep_map);
        jbd2_journal_free_transaction(new_transaction);
@@ -348,16 +399,21 @@ static handle_t *new_handle(int nblocks)
  *
  * We make sure that the transaction can guarantee at least nblocks of
  * modified buffers in the log.  We block until the log can guarantee
- * that much space.
- *
- * This function is visible to journal users (like ext3fs), so is not
- * called with the journal already locked.
+ * that much space. Additionally, if rsv_blocks > 0, we also create another
+ * handle with rsv_blocks reserved blocks in the journal. This handle is
+ * is stored in h_rsv_handle. It is not attached to any particular transaction
+ * and thus doesn't block transaction commit. If the caller uses this reserved
+ * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
+ * on the parent handle will dispose the reserved one. Reserved handle has to
+ * be converted to a normal handle using jbd2_journal_start_reserved() before
+ * it can be used.
  *
  * Return a pointer to a newly allocated handle, or an ERR_PTR() value
  * on failure.
  */
-handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask,
-                             unsigned int type, unsigned int line_no)
+handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
+                             gfp_t gfp_mask, unsigned int type,
+                             unsigned int line_no)
 {
        handle_t *handle = journal_current_handle();
        int err;
@@ -374,13 +430,24 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask,
        handle = new_handle(nblocks);
        if (!handle)
                return ERR_PTR(-ENOMEM);
+       if (rsv_blocks) {
+               handle_t *rsv_handle;
 
-       current->journal_info = handle;
+               rsv_handle = new_handle(rsv_blocks);
+               if (!rsv_handle) {
+                       jbd2_free_handle(handle);
+                       return ERR_PTR(-ENOMEM);
+               }
+               rsv_handle->h_reserved = 1;
+               rsv_handle->h_journal = journal;
+               handle->h_rsv_handle = rsv_handle;
+       }
 
        err = start_this_handle(journal, handle, gfp_mask);
        if (err < 0) {
+               if (handle->h_rsv_handle)
+                       jbd2_free_handle(handle->h_rsv_handle);
                jbd2_free_handle(handle);
-               current->journal_info = NULL;
                return ERR_PTR(err);
        }
        handle->h_type = type;
@@ -395,10 +462,65 @@ EXPORT_SYMBOL(jbd2__journal_start);
 
 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
 {
-       return jbd2__journal_start(journal, nblocks, GFP_NOFS, 0, 0);
+       return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
 }
 EXPORT_SYMBOL(jbd2_journal_start);
 
+void jbd2_journal_free_reserved(handle_t *handle)
+{
+       journal_t *journal = handle->h_journal;
+
+       WARN_ON(!handle->h_reserved);
+       sub_reserved_credits(journal, handle->h_buffer_credits);
+       jbd2_free_handle(handle);
+}
+EXPORT_SYMBOL(jbd2_journal_free_reserved);
+
+/**
+ * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
+ * @handle: handle to start
+ *
+ * Start handle that has been previously reserved with jbd2_journal_reserve().
+ * This attaches @handle to the running transaction (or creates one if there's
+ * not transaction running). Unlike jbd2_journal_start() this function cannot
+ * block on journal commit, checkpointing, or similar stuff. It can block on
+ * memory allocation or frozen journal though.
+ *
+ * Return 0 on success, non-zero on error - handle is freed in that case.
+ */
+int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
+                               unsigned int line_no)
+{
+       journal_t *journal = handle->h_journal;
+       int ret = -EIO;
+
+       if (WARN_ON(!handle->h_reserved)) {
+               /* Someone passed in normal handle? Just stop it. */
+               jbd2_journal_stop(handle);
+               return ret;
+       }
+       /*
+        * Usefulness of mixing of reserved and unreserved handles is
+        * questionable. So far nobody seems to need it so just error out.
+        */
+       if (WARN_ON(current->journal_info)) {
+               jbd2_journal_free_reserved(handle);
+               return ret;
+       }
+
+       handle->h_journal = NULL;
+       /*
+        * GFP_NOFS is here because callers are likely from writeback or
+        * similarly constrained call sites
+        */
+       ret = start_this_handle(journal, handle, GFP_NOFS);
+       if (ret < 0)
+               jbd2_journal_free_reserved(handle);
+       handle->h_type = type;
+       handle->h_line_no = line_no;
+       return ret;
+}
+EXPORT_SYMBOL(jbd2_journal_start_reserved);
 
 /**
  * int jbd2_journal_extend() - extend buffer credits.
@@ -423,49 +545,53 @@ EXPORT_SYMBOL(jbd2_journal_start);
 int jbd2_journal_extend(handle_t *handle, int nblocks)
 {
        transaction_t *transaction = handle->h_transaction;
-       journal_t *journal = transaction->t_journal;
+       journal_t *journal;
        int result;
        int wanted;
 
-       result = -EIO;
+       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
-               goto out;
+               return -EROFS;
+       journal = transaction->t_journal;
 
        result = 1;
 
        read_lock(&journal->j_state_lock);
 
        /* Don't extend a locked-down transaction! */
-       if (handle->h_transaction->t_state != T_RUNNING) {
+       if (transaction->t_state != T_RUNNING) {
                jbd_debug(3, "denied handle %p %d blocks: "
                          "transaction not running\n", handle, nblocks);
                goto error_out;
        }
 
        spin_lock(&transaction->t_handle_lock);
-       wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
+       wanted = atomic_add_return(nblocks,
+                                  &transaction->t_outstanding_credits);
 
        if (wanted > journal->j_max_transaction_buffers) {
                jbd_debug(3, "denied handle %p %d blocks: "
                          "transaction too large\n", handle, nblocks);
+               atomic_sub(nblocks, &transaction->t_outstanding_credits);
                goto unlock;
        }
 
-       if (wanted > __jbd2_log_space_left(journal)) {
+       if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
+           jbd2_log_space_left(journal)) {
                jbd_debug(3, "denied handle %p %d blocks: "
                          "insufficient log space\n", handle, nblocks);
+               atomic_sub(nblocks, &transaction->t_outstanding_credits);
                goto unlock;
        }
 
        trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
-                                handle->h_transaction->t_tid,
+                                transaction->t_tid,
                                 handle->h_type, handle->h_line_no,
                                 handle->h_buffer_credits,
                                 nblocks);
 
        handle->h_buffer_credits += nblocks;
        handle->h_requested_credits += nblocks;
-       atomic_add(nblocks, &transaction->t_outstanding_credits);
        result = 0;
 
        jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
@@ -473,7 +599,6 @@ unlock:
        spin_unlock(&transaction->t_handle_lock);
 error_out:
        read_unlock(&journal->j_state_lock);
-out:
        return result;
 }
 
@@ -490,19 +615,22 @@ out:
  * to a running handle, a call to jbd2_journal_restart will commit the
  * handle's transaction so far and reattach the handle to a new
  * transaction capabable of guaranteeing the requested number of
- * credits.
+ * credits. We preserve reserved handle if there's any attached to the
+ * passed in handle.
  */
 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
 {
        transaction_t *transaction = handle->h_transaction;
-       journal_t *journal = transaction->t_journal;
+       journal_t *journal;
        tid_t           tid;
        int             need_to_start, ret;
 
+       WARN_ON(!transaction);
        /* If we've had an abort of any type, don't even think about
         * actually doing the restart! */
        if (is_handle_aborted(handle))
                return 0;
+       journal = transaction->t_journal;
 
        /*
         * First unlink the handle from its current transaction, and start the
@@ -515,12 +643,18 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
        spin_lock(&transaction->t_handle_lock);
        atomic_sub(handle->h_buffer_credits,
                   &transaction->t_outstanding_credits);
+       if (handle->h_rsv_handle) {
+               sub_reserved_credits(journal,
+                                    handle->h_rsv_handle->h_buffer_credits);
+       }
        if (atomic_dec_and_test(&transaction->t_updates))
                wake_up(&journal->j_wait_updates);
+       tid = transaction->t_tid;
        spin_unlock(&transaction->t_handle_lock);
+       handle->h_transaction = NULL;
+       current->journal_info = NULL;
 
        jbd_debug(2, "restarting handle %p\n", handle);
-       tid = transaction->t_tid;
        need_to_start = !tid_geq(journal->j_commit_request, tid);
        read_unlock(&journal->j_state_lock);
        if (need_to_start)
@@ -557,6 +691,14 @@ void jbd2_journal_lock_updates(journal_t *journal)
        write_lock(&journal->j_state_lock);
        ++journal->j_barrier_count;
 
+       /* Wait until there are no reserved handles */
+       if (atomic_read(&journal->j_reserved_credits)) {
+               write_unlock(&journal->j_state_lock);
+               wait_event(journal->j_wait_reserved,
+                          atomic_read(&journal->j_reserved_credits) == 0);
+               write_lock(&journal->j_state_lock);
+       }
+
        /* Wait until there are no running updates */
        while (1) {
                transaction_t *transaction = journal->j_running_transaction;
@@ -619,6 +761,12 @@ static void warn_dirty_buffer(struct buffer_head *bh)
               bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
 }
 
+static int sleep_on_shadow_bh(void *word)
+{
+       io_schedule();
+       return 0;
+}
+
 /*
  * If the buffer is already part of the current transaction, then there
  * is nothing we need to do.  If it is already part of a prior
@@ -634,17 +782,16 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
                        int force_copy)
 {
        struct buffer_head *bh;
-       transaction_t *transaction;
+       transaction_t *transaction = handle->h_transaction;
        journal_t *journal;
        int error;
        char *frozen_buffer = NULL;
        int need_copy = 0;
        unsigned long start_lock, time_lock;
 
+       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
                return -EROFS;
-
-       transaction = handle->h_transaction;
        journal = transaction->t_journal;
 
        jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
@@ -754,41 +901,29 @@ repeat:
                 * journaled.  If the primary copy is already going to
                 * disk then we cannot do copy-out here. */
 
-               if (jh->b_jlist == BJ_Shadow) {
-                       DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
-                       wait_queue_head_t *wqh;
-
-                       wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
-
+               if (buffer_shadow(bh)) {
                        JBUFFER_TRACE(jh, "on shadow: sleep");
                        jbd_unlock_bh_state(bh);
-                       /* commit wakes up all shadow buffers after IO */
-                       for ( ; ; ) {
-                               prepare_to_wait(wqh, &wait.wait,
-                                               TASK_UNINTERRUPTIBLE);
-                               if (jh->b_jlist != BJ_Shadow)
-                                       break;
-                               schedule();
-                       }
-                       finish_wait(wqh, &wait.wait);
+                       wait_on_bit(&bh->b_state, BH_Shadow,
+                                   sleep_on_shadow_bh, TASK_UNINTERRUPTIBLE);
                        goto repeat;
                }
 
-               /* Only do the copy if the currently-owning transaction
-                * still needs it.  If it is on the Forget list, the
-                * committing transaction is past that stage.  The
-                * buffer had better remain locked during the kmalloc,
-                * but that should be true --- we hold the journal lock
-                * still and the buffer is already on the BUF_JOURNAL
-                * list so won't be flushed.
+               /*
+                * Only do the copy if the currently-owning transaction still
+                * needs it. If buffer isn't on BJ_Metadata list, the
+                * committing transaction is past that stage (here we use the
+                * fact that BH_Shadow is set under bh_state lock together with
+                * refiling to BJ_Shadow list and at this point we know the
+                * buffer doesn't have BH_Shadow set).
                 *
                 * Subtle point, though: if this is a get_undo_access,
                 * then we will be relying on the frozen_data to contain
                 * the new value of the committed_data record after the
                 * transaction, so we HAVE to force the frozen_data copy
-                * in that case. */
-
-               if (jh->b_jlist != BJ_Forget || force_copy) {
+                * in that case.
+                */
+               if (jh->b_jlist == BJ_Metadata || force_copy) {
                        JBUFFER_TRACE(jh, "generate frozen data");
                        if (!frozen_buffer) {
                                JBUFFER_TRACE(jh, "allocate memory for buffer");
@@ -915,14 +1050,16 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
 {
        transaction_t *transaction = handle->h_transaction;
-       journal_t *journal = transaction->t_journal;
+       journal_t *journal;
        struct journal_head *jh = jbd2_journal_add_journal_head(bh);
        int err;
 
        jbd_debug(5, "journal_head %p\n", jh);
+       WARN_ON(!transaction);
        err = -EROFS;
        if (is_handle_aborted(handle))
                goto out;
+       journal = transaction->t_journal;
        err = 0;
 
        JBUFFER_TRACE(jh, "entry");
@@ -1128,12 +1265,14 @@ void jbd2_buffer_abort_trigger(struct journal_head *jh,
 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
 {
        transaction_t *transaction = handle->h_transaction;
-       journal_t *journal = transaction->t_journal;
+       journal_t *journal;
        struct journal_head *jh;
        int ret = 0;
 
+       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
-               goto out;
+               return -EROFS;
+       journal = transaction->t_journal;
        jh = jbd2_journal_grab_journal_head(bh);
        if (!jh) {
                ret = -EUCLEAN;
@@ -1227,7 +1366,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
 
        JBUFFER_TRACE(jh, "file as BJ_Metadata");
        spin_lock(&journal->j_list_lock);
-       __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
+       __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
        spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
        jbd_unlock_bh_state(bh);
@@ -1258,12 +1397,17 @@ out:
 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
 {
        transaction_t *transaction = handle->h_transaction;
-       journal_t *journal = transaction->t_journal;
+       journal_t *journal;
        struct journal_head *jh;
        int drop_reserve = 0;
        int err = 0;
        int was_modified = 0;
 
+       WARN_ON(!transaction);
+       if (is_handle_aborted(handle))
+               return -EROFS;
+       journal = transaction->t_journal;
+
        BUFFER_TRACE(bh, "entry");
 
        jbd_lock_bh_state(bh);
@@ -1290,7 +1434,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
         */
        jh->b_modified = 0;
 
-       if (jh->b_transaction == handle->h_transaction) {
+       if (jh->b_transaction == transaction) {
                J_ASSERT_JH(jh, !jh->b_frozen_data);
 
                /* If we are forgetting a buffer which is already part
@@ -1385,19 +1529,21 @@ drop:
 int jbd2_journal_stop(handle_t *handle)
 {
        transaction_t *transaction = handle->h_transaction;
-       journal_t *journal = transaction->t_journal;
-       int err, wait_for_commit = 0;
+       journal_t *journal;
+       int err = 0, wait_for_commit = 0;
        tid_t tid;
        pid_t pid;
 
+       if (!transaction)
+               goto free_and_exit;
+       journal = transaction->t_journal;
+
        J_ASSERT(journal_current_handle() == handle);
 
        if (is_handle_aborted(handle))
                err = -EIO;
-       else {
+       else
                J_ASSERT(atomic_read(&transaction->t_updates) > 0);
-               err = 0;
-       }
 
        if (--handle->h_ref > 0) {
                jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
@@ -1407,7 +1553,7 @@ int jbd2_journal_stop(handle_t *handle)
 
        jbd_debug(4, "Handle %p going down\n", handle);
        trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
-                               handle->h_transaction->t_tid,
+                               transaction->t_tid,
                                handle->h_type, handle->h_line_no,
                                jiffies - handle->h_start_jiffies,
                                handle->h_sync, handle->h_requested_credits,
@@ -1518,33 +1664,13 @@ int jbd2_journal_stop(handle_t *handle)
 
        lock_map_release(&handle->h_lockdep_map);
 
+       if (handle->h_rsv_handle)
+               jbd2_journal_free_reserved(handle->h_rsv_handle);
+free_and_exit:
        jbd2_free_handle(handle);
        return err;
 }
 
-/**
- * int jbd2_journal_force_commit() - force any uncommitted transactions
- * @journal: journal to force
- *
- * For synchronous operations: force any uncommitted transactions
- * to disk.  May seem kludgy, but it reuses all the handle batching
- * code in a very simple manner.
- */
-int jbd2_journal_force_commit(journal_t *journal)
-{
-       handle_t *handle;
-       int ret;
-
-       handle = jbd2_journal_start(journal, 1);
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-       } else {
-               handle->h_sync = 1;
-               ret = jbd2_journal_stop(handle);
-       }
-       return ret;
-}
-
 /*
  *
  * List management code snippets: various functions for manipulating the
@@ -1601,10 +1727,10 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
  * Remove a buffer from the appropriate transaction list.
  *
  * Note that this function can *change* the value of
- * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
- * t_log_list or t_reserved_list.  If the caller is holding onto a copy of one
- * of these pointers, it could go bad.  Generally the caller needs to re-read
- * the pointer from the transaction_t.
+ * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
+ * t_reserved_list.  If the caller is holding onto a copy of one of these
+ * pointers, it could go bad.  Generally the caller needs to re-read the
+ * pointer from the transaction_t.
  *
  * Called under j_list_lock.
  */
@@ -1634,15 +1760,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
        case BJ_Forget:
                list = &transaction->t_forget;
                break;
-       case BJ_IO:
-               list = &transaction->t_iobuf_list;
-               break;
        case BJ_Shadow:
                list = &transaction->t_shadow_list;
                break;
-       case BJ_LogCtl:
-               list = &transaction->t_log_list;
-               break;
        case BJ_Reserved:
                list = &transaction->t_reserved_list;
                break;
@@ -2034,18 +2154,23 @@ zap_buffer_unlocked:
  * void jbd2_journal_invalidatepage()
  * @journal: journal to use for flush...
  * @page:    page to flush
- * @offset:  length of page to invalidate.
+ * @offset:  start of the range to invalidate
+ * @length:  length of the range to invalidate
  *
- * Reap page buffers containing data after offset in page. Can return -EBUSY
- * if buffers are part of the committing transaction and the page is straddling
- * i_size. Caller then has to wait for current commit and try again.
+ * Reap page buffers containing data after in the specified range in page.
+ * Can return -EBUSY if buffers are part of the committing transaction and
+ * the page is straddling i_size. Caller then has to wait for current commit
+ * and try again.
  */
 int jbd2_journal_invalidatepage(journal_t *journal,
                                struct page *page,
-                               unsigned long offset)
+                               unsigned int offset,
+                               unsigned int length)
 {
        struct buffer_head *head, *bh, *next;
+       unsigned int stop = offset + length;
        unsigned int curr_off = 0;
+       int partial_page = (offset || length < PAGE_CACHE_SIZE);
        int may_free = 1;
        int ret = 0;
 
@@ -2054,6 +2179,8 @@ int jbd2_journal_invalidatepage(journal_t *journal,
        if (!page_has_buffers(page))
                return 0;
 
+       BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+
        /* We will potentially be playing with lists other than just the
         * data lists (especially for journaled data mode), so be
         * cautious in our locking. */
@@ -2063,10 +2190,13 @@ int jbd2_journal_invalidatepage(journal_t *journal,
                unsigned int next_off = curr_off + bh->b_size;
                next = bh->b_this_page;
 
+               if (next_off > stop)
+                       return 0;
+
                if (offset <= curr_off) {
                        /* This block is wholly outside the truncation point */
                        lock_buffer(bh);
-                       ret = journal_unmap_buffer(journal, bh, offset > 0);
+                       ret = journal_unmap_buffer(journal, bh, partial_page);
                        unlock_buffer(bh);
                        if (ret < 0)
                                return ret;
@@ -2077,7 +2207,7 @@ int jbd2_journal_invalidatepage(journal_t *journal,
 
        } while (bh != head);
 
-       if (!offset) {
+       if (!partial_page) {
                if (may_free && try_to_free_buffers(page))
                        J_ASSERT(!page_has_buffers(page));
        }
@@ -2138,15 +2268,9 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
        case BJ_Forget:
                list = &transaction->t_forget;
                break;
-       case BJ_IO:
-               list = &transaction->t_iobuf_list;
-               break;
        case BJ_Shadow:
                list = &transaction->t_shadow_list;
                break;
-       case BJ_LogCtl:
-               list = &transaction->t_log_list;
-               break;
        case BJ_Reserved:
                list = &transaction->t_reserved_list;
                break;
@@ -2248,10 +2372,12 @@ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
 {
        transaction_t *transaction = handle->h_transaction;
-       journal_t *journal = transaction->t_journal;
+       journal_t *journal;
 
+       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
-               return -EIO;
+               return -EROFS;
+       journal = transaction->t_journal;
 
        jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
                        transaction->t_tid);
index acd46a4160cb939262565fc3cf43d600c3cb8e20..e3aac222472e1f06e9d998f46323b4d8be0e1071 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/time.h>
 #include "nodelist.h"
 
-static int jffs2_readdir (struct file *, void *, filldir_t);
+static int jffs2_readdir (struct file *, struct dir_context *);
 
 static int jffs2_create (struct inode *,struct dentry *,umode_t,
                         bool);
@@ -40,7 +40,7 @@ static int jffs2_rename (struct inode *, struct dentry *,
 const struct file_operations jffs2_dir_operations =
 {
        .read =         generic_read_dir,
-       .readdir =      jffs2_readdir,
+       .iterate =      jffs2_readdir,
        .unlocked_ioctl=jffs2_ioctl,
        .fsync =        jffs2_fsync,
        .llseek =       generic_file_llseek,
@@ -114,60 +114,40 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
 /***********************************************************************/
 
 
-static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int jffs2_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct jffs2_inode_info *f;
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
+       struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
        struct jffs2_full_dirent *fd;
-       unsigned long offset, curofs;
+       unsigned long curofs = 1;
 
-       jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n",
-                 file_inode(filp)->i_ino);
+       jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", inode->i_ino);
 
-       f = JFFS2_INODE_INFO(inode);
-
-       offset = filp->f_pos;
-
-       if (offset == 0) {
-               jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino);
-               if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
-                       goto out;
-               offset++;
-       }
-       if (offset == 1) {
-               unsigned long pino = parent_ino(filp->f_path.dentry);
-               jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino);
-               if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
-                       goto out;
-               offset++;
-       }
+       if (!dir_emit_dots(file, ctx))
+               return 0;
 
-       curofs=1;
        mutex_lock(&f->sem);
        for (fd = f->dents; fd; fd = fd->next) {
-
                curofs++;
-               /* First loop: curofs = 2; offset = 2 */
-               if (curofs < offset) {
+               /* First loop: curofs = 2; pos = 2 */
+               if (curofs < ctx->pos) {
                        jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
-                                 fd->name, fd->ino, fd->type, curofs, offset);
+                                 fd->name, fd->ino, fd->type, curofs, (unsigned long)ctx->pos);
                        continue;
                }
                if (!fd->ino) {
                        jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n",
                                  fd->name);
-                       offset++;
+                       ctx->pos++;
                        continue;
                }
                jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n",
-                         offset, fd->name, fd->ino, fd->type);
-               if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0)
+                         (unsigned long)ctx->pos, fd->name, fd->ino, fd->type);
+               if (!dir_emit(ctx, fd->name, strlen(fd->name), fd->ino, fd->type))
                        break;
-               offset++;
+               ctx->pos++;
        }
        mutex_unlock(&f->sem);
- out:
-       filp->f_pos = offset;
        return 0;
 }
 
index 0ddbeceafc626f9471042f09f7a74ada4514ad12..9f4ed13d9f152bff068f68528fc2b1055ee9255c 100644 (file)
@@ -3002,9 +3002,9 @@ static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
  * return: offset = (pn, index) of start entry
  *     of next jfs_readdir()/dtRead()
  */
-int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+int jfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *ip = file_inode(filp);
+       struct inode *ip = file_inode(file);
        struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
        int rc = 0;
        loff_t dtpos;   /* legacy OS/2 style position */
@@ -3033,7 +3033,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        int overflow, fix_page, page_fixed = 0;
        static int unique_pos = 2;      /* If we can't fix broken index */
 
-       if (filp->f_pos == DIREND)
+       if (ctx->pos == DIREND)
                return 0;
 
        if (DO_INDEX(ip)) {
@@ -3045,7 +3045,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                 */
                do_index = 1;
 
-               dir_index = (u32) filp->f_pos;
+               dir_index = (u32) ctx->pos;
 
                if (dir_index > 1) {
                        struct dir_table_slot dirtab_slot;
@@ -3053,25 +3053,25 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        if (dtEmpty(ip) ||
                            (dir_index >= JFS_IP(ip)->next_index)) {
                                /* Stale position.  Directory has shrunk */
-                               filp->f_pos = DIREND;
+                               ctx->pos = DIREND;
                                return 0;
                        }
                      repeat:
                        rc = read_index(ip, dir_index, &dirtab_slot);
                        if (rc) {
-                               filp->f_pos = DIREND;
+                               ctx->pos = DIREND;
                                return rc;
                        }
                        if (dirtab_slot.flag == DIR_INDEX_FREE) {
                                if (loop_count++ > JFS_IP(ip)->next_index) {
                                        jfs_err("jfs_readdir detected "
                                                   "infinite loop!");
-                                       filp->f_pos = DIREND;
+                                       ctx->pos = DIREND;
                                        return 0;
                                }
                                dir_index = le32_to_cpu(dirtab_slot.addr2);
                                if (dir_index == -1) {
-                                       filp->f_pos = DIREND;
+                                       ctx->pos = DIREND;
                                        return 0;
                                }
                                goto repeat;
@@ -3080,13 +3080,13 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        index = dirtab_slot.slot;
                        DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
                        if (rc) {
-                               filp->f_pos = DIREND;
+                               ctx->pos = DIREND;
                                return 0;
                        }
                        if (p->header.flag & BT_INTERNAL) {
                                jfs_err("jfs_readdir: bad index table");
                                DT_PUTPAGE(mp);
-                               filp->f_pos = -1;
+                               ctx->pos = -1;
                                return 0;
                        }
                } else {
@@ -3094,23 +3094,22 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                /*
                                 * self "."
                                 */
-                               filp->f_pos = 0;
-                               if (filldir(dirent, ".", 1, 0, ip->i_ino,
-                                           DT_DIR))
+                               ctx->pos = 0;
+                               if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
                                        return 0;
                        }
                        /*
                         * parent ".."
                         */
-                       filp->f_pos = 1;
-                       if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
+                       ctx->pos = 1;
+                       if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
                                return 0;
 
                        /*
                         * Find first entry of left-most leaf
                         */
                        if (dtEmpty(ip)) {
-                               filp->f_pos = DIREND;
+                               ctx->pos = DIREND;
                                return 0;
                        }
 
@@ -3128,23 +3127,19 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                 * pn > 0:              Real entries, pn=1 -> leftmost page
                 * pn = index = -1:     No more entries
                 */
-               dtpos = filp->f_pos;
+               dtpos = ctx->pos;
                if (dtpos == 0) {
                        /* build "." entry */
-
-                       if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
-                                   DT_DIR))
+                       if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
                                return 0;
                        dtoffset->index = 1;
-                       filp->f_pos = dtpos;
+                       ctx->pos = dtpos;
                }
 
                if (dtoffset->pn == 0) {
                        if (dtoffset->index == 1) {
                                /* build ".." entry */
-
-                               if (filldir(dirent, "..", 2, filp->f_pos,
-                                           PARENT(ip), DT_DIR))
+                               if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
                                        return 0;
                        } else {
                                jfs_err("jfs_readdir called with "
@@ -3152,18 +3147,18 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        }
                        dtoffset->pn = 1;
                        dtoffset->index = 0;
-                       filp->f_pos = dtpos;
+                       ctx->pos = dtpos;
                }
 
                if (dtEmpty(ip)) {
-                       filp->f_pos = DIREND;
+                       ctx->pos = DIREND;
                        return 0;
                }
 
-               if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) {
+               if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) {
                        jfs_err("jfs_readdir: unexpected rc = %d "
                                "from dtReadNext", rc);
-                       filp->f_pos = DIREND;
+                       ctx->pos = DIREND;
                        return 0;
                }
                /* get start leaf page and index */
@@ -3171,7 +3166,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 
                /* offset beyond directory eof ? */
                if (bn < 0) {
-                       filp->f_pos = DIREND;
+                       ctx->pos = DIREND;
                        return 0;
                }
        }
@@ -3180,7 +3175,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (dirent_buf == 0) {
                DT_PUTPAGE(mp);
                jfs_warn("jfs_readdir: __get_free_page failed!");
-               filp->f_pos = DIREND;
+               ctx->pos = DIREND;
                return -ENOMEM;
        }
 
@@ -3295,9 +3290,9 @@ skip_one:
 
                jfs_dirent = (struct jfs_dirent *) dirent_buf;
                while (jfs_dirents--) {
-                       filp->f_pos = jfs_dirent->position;
-                       if (filldir(dirent, jfs_dirent->name,
-                                   jfs_dirent->name_len, filp->f_pos,
+                       ctx->pos = jfs_dirent->position;
+                       if (!dir_emit(ctx, jfs_dirent->name,
+                                   jfs_dirent->name_len,
                                    jfs_dirent->ino, DT_UNKNOWN))
                                goto out;
                        jfs_dirent = next_jfs_dirent(jfs_dirent);
@@ -3309,7 +3304,7 @@ skip_one:
                }
 
                if (!overflow && (bn == 0)) {
-                       filp->f_pos = DIREND;
+                       ctx->pos = DIREND;
                        break;
                }
 
index 2545bb317235d2c6423c6cefa97afd73ed763107..fd4169e6e6984f0ef0fac00ee3b7c43771b4f143 100644 (file)
@@ -265,5 +265,5 @@ extern int dtDelete(tid_t tid, struct inode *ip, struct component_name * key,
 extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key,
                    ino_t * orig_ino, ino_t new_ino, int flag);
 
-extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir);
+extern int jfs_readdir(struct file *file, struct dir_context *ctx);
 #endif                         /* !_H_JFS_DTREE */
index 6740d34cd82b802e948b8760fcf13954a8a12ad2..9e3aaff11f89cf9d67e2e1289fb1f47003c584c9 100644 (file)
@@ -571,9 +571,10 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
        return ret;
 }
 
-static void metapage_invalidatepage(struct page *page, unsigned long offset)
+static void metapage_invalidatepage(struct page *page, unsigned int offset,
+                                   unsigned int length)
 {
-       BUG_ON(offset);
+       BUG_ON(offset || length < PAGE_CACHE_SIZE);
 
        BUG_ON(PageWriteback(page));
 
index 3b91a7ad60862008f50c67526b7854f938aadc19..89186b7b9002145d268aa7054559b384422dd1d2 100644 (file)
@@ -1529,7 +1529,7 @@ const struct inode_operations jfs_dir_inode_operations = {
 
 const struct file_operations jfs_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = jfs_readdir,
+       .iterate        = jfs_readdir,
        .fsync          = jfs_fsync,
        .unlocked_ioctl = jfs_ioctl,
 #ifdef CONFIG_COMPAT
index 916da8c4158b0c01d834ead4b056b014eb1797a1..c3a0837fb861d8d403e6b3b7fadd4056aa42df79 100644 (file)
@@ -135,60 +135,40 @@ static inline unsigned char dt_type(struct inode *inode)
  * both impossible due to the lock on directory.
  */
 
-int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct dentry *cursor = filp->private_data;
+       struct dentry *dentry = file->f_path.dentry;
+       struct dentry *cursor = file->private_data;
        struct list_head *p, *q = &cursor->d_u.d_child;
-       ino_t ino;
-       int i = filp->f_pos;
 
-       switch (i) {
-               case 0:
-                       ino = dentry->d_inode->i_ino;
-                       if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
-                               break;
-                       filp->f_pos++;
-                       i++;
-                       /* fallthrough */
-               case 1:
-                       ino = parent_ino(dentry);
-                       if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
-                               break;
-                       filp->f_pos++;
-                       i++;
-                       /* fallthrough */
-               default:
-                       spin_lock(&dentry->d_lock);
-                       if (filp->f_pos == 2)
-                               list_move(q, &dentry->d_subdirs);
-
-                       for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
-                               struct dentry *next;
-                               next = list_entry(p, struct dentry, d_u.d_child);
-                               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-                               if (!simple_positive(next)) {
-                                       spin_unlock(&next->d_lock);
-                                       continue;
-                               }
+       if (!dir_emit_dots(file, ctx))
+               return 0;
+       spin_lock(&dentry->d_lock);
+       if (ctx->pos == 2)
+               list_move(q, &dentry->d_subdirs);
+
+       for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
+               struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
+               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+               if (!simple_positive(next)) {
+                       spin_unlock(&next->d_lock);
+                       continue;
+               }
 
-                               spin_unlock(&next->d_lock);
-                               spin_unlock(&dentry->d_lock);
-                               if (filldir(dirent, next->d_name.name, 
-                                           next->d_name.len, filp->f_pos, 
-                                           next->d_inode->i_ino, 
-                                           dt_type(next->d_inode)) < 0)
-                                       return 0;
-                               spin_lock(&dentry->d_lock);
-                               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-                               /* next is still alive */
-                               list_move(q, p);
-                               spin_unlock(&next->d_lock);
-                               p = q;
-                               filp->f_pos++;
-                       }
-                       spin_unlock(&dentry->d_lock);
+               spin_unlock(&next->d_lock);
+               spin_unlock(&dentry->d_lock);
+               if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
+                             next->d_inode->i_ino, dt_type(next->d_inode)))
+                       return 0;
+               spin_lock(&dentry->d_lock);
+               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+               /* next is still alive */
+               list_move(q, p);
+               spin_unlock(&next->d_lock);
+               p = q;
+               ctx->pos++;
        }
+       spin_unlock(&dentry->d_lock);
        return 0;
 }
 
@@ -202,7 +182,7 @@ const struct file_operations simple_dir_operations = {
        .release        = dcache_dir_close,
        .llseek         = dcache_dir_lseek,
        .read           = generic_read_dir,
-       .readdir        = dcache_readdir,
+       .iterate        = dcache_readdir,
        .fsync          = noop_fsync,
 };
 
index b82751082112c5e95d3cf24f21580db334c5c03f..6bdc347008f5eb15d2d60ea9750202e72d380f18 100644 (file)
@@ -281,17 +281,23 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
 
 /* FIXME: readdir currently has it's own dir_walk code.  I don't see a good
  * way to combine the two copies */
-#define IMPLICIT_NODES 2
-static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
+static int logfs_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *dir = file_inode(file);
-       loff_t pos = file->f_pos - IMPLICIT_NODES;
+       loff_t pos;
        struct page *page;
        struct logfs_disk_dentry *dd;
-       int full;
 
+       if (ctx->pos < 0)
+               return -EINVAL;
+
+       if (!dir_emit_dots(file, ctx))
+               return 0;
+
+       pos = ctx->pos - 2;
        BUG_ON(pos < 0);
-       for (;; pos++) {
+       for (;; pos++, ctx->pos++) {
+               bool full;
                if (beyond_eof(dir, pos))
                        break;
                if (!logfs_exist_block(dir, pos)) {
@@ -306,42 +312,17 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
                dd = kmap(page);
                BUG_ON(dd->namelen == 0);
 
-               full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
-                               pos, be64_to_cpu(dd->ino), dd->type);
+               full = !dir_emit(ctx, (char *)dd->name,
+                               be16_to_cpu(dd->namelen),
+                               be64_to_cpu(dd->ino), dd->type);
                kunmap(page);
                page_cache_release(page);
                if (full)
                        break;
        }
-
-       file->f_pos = pos + IMPLICIT_NODES;
        return 0;
 }
 
-static int logfs_readdir(struct file *file, void *buf, filldir_t filldir)
-{
-       struct inode *inode = file_inode(file);
-       ino_t pino = parent_ino(file->f_dentry);
-       int err;
-
-       if (file->f_pos < 0)
-               return -EINVAL;
-
-       if (file->f_pos == 0) {
-               if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0)
-                       return 0;
-               file->f_pos++;
-       }
-       if (file->f_pos == 1) {
-               if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0)
-                       return 0;
-               file->f_pos++;
-       }
-
-       err = __logfs_readdir(file, buf, filldir);
-       return err;
-}
-
 static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name)
 {
        dd->namelen = cpu_to_be16(name->len);
@@ -814,7 +795,7 @@ const struct inode_operations logfs_dir_iops = {
 const struct file_operations logfs_dir_fops = {
        .fsync          = logfs_fsync,
        .unlocked_ioctl = logfs_ioctl,
-       .readdir        = logfs_readdir,
+       .iterate        = logfs_readdir,
        .read           = generic_read_dir,
        .llseek         = default_llseek,
 };
index c2219a6dd3c8857620dd0a42b7b0f88c88e29828..57914fc32b62538f43909d35ffc031742b98a881 100644 (file)
@@ -159,7 +159,8 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc)
        return __logfs_writepage(page);
 }
 
-static void logfs_invalidatepage(struct page *page, unsigned long offset)
+static void logfs_invalidatepage(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
        struct logfs_block *block = logfs_block(page);
 
index 038da0991794a39962fac3d4ef7ed5b18008c6f9..d448a777166b71bc21df131c0c32a462d97b5efe 100644 (file)
@@ -884,7 +884,8 @@ static struct logfs_area *alloc_area(struct super_block *sb)
        return area;
 }
 
-static void map_invalidatepage(struct page *page, unsigned long l)
+static void map_invalidatepage(struct page *page, unsigned int o,
+                              unsigned int l)
 {
        return;
 }
index a9ed6f36e6ead1d32bf754b50d5eaed3068d6cf2..08c442902fcdbf9ec5a9039621f8b8900ea6bf0e 100644 (file)
 typedef struct minix_dir_entry minix_dirent;
 typedef struct minix3_dir_entry minix3_dirent;
 
-static int minix_readdir(struct file *, void *, filldir_t);
+static int minix_readdir(struct file *, struct dir_context *);
 
 const struct file_operations minix_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = minix_readdir,
+       .iterate        = minix_readdir,
        .fsync          = generic_file_fsync,
 };
 
@@ -82,22 +82,23 @@ static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
        return (void*)((char*)de + sbi->s_dirsize);
 }
 
-static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static int minix_readdir(struct file *file, struct dir_context *ctx)
 {
-       unsigned long pos = filp->f_pos;
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
-       unsigned offset = pos & ~PAGE_CACHE_MASK;
-       unsigned long n = pos >> PAGE_CACHE_SHIFT;
-       unsigned long npages = dir_pages(inode);
        struct minix_sb_info *sbi = minix_sb(sb);
        unsigned chunk_size = sbi->s_dirsize;
-       char *name;
-       __u32 inumber;
+       unsigned long npages = dir_pages(inode);
+       unsigned long pos = ctx->pos;
+       unsigned offset;
+       unsigned long n;
 
-       pos = (pos + chunk_size-1) & ~(chunk_size-1);
+       ctx->pos = pos = (pos + chunk_size-1) & ~(chunk_size-1);
        if (pos >= inode->i_size)
-               goto done;
+               return 0;
+
+       offset = pos & ~PAGE_CACHE_MASK;
+       n = pos >> PAGE_CACHE_SHIFT;
 
        for ( ; n < npages; n++, offset = 0) {
                char *p, *kaddr, *limit;
@@ -109,6 +110,8 @@ static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
                p = kaddr+offset;
                limit = kaddr + minix_last_byte(inode, n) - chunk_size;
                for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
+                       const char *name;
+                       __u32 inumber;
                        if (sbi->s_version == MINIX_V3) {
                                minix3_dirent *de3 = (minix3_dirent *)p;
                                name = de3->name;
@@ -119,24 +122,17 @@ static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
                                inumber = de->inode;
                        }
                        if (inumber) {
-                               int over;
-
                                unsigned l = strnlen(name, sbi->s_namelen);
-                               offset = p - kaddr;
-                               over = filldir(dirent, name, l,
-                                       (n << PAGE_CACHE_SHIFT) | offset,
-                                       inumber, DT_UNKNOWN);
-                               if (over) {
+                               if (!dir_emit(ctx, name, l,
+                                             inumber, DT_UNKNOWN)) {
                                        dir_put_page(page);
-                                       goto done;
+                                       return 0;
                                }
                        }
+                       ctx->pos += chunk_size;
                }
                dir_put_page(page);
        }
-
-done:
-       filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
        return 0;
 }
 
index 6792ce11f2bfc19757c0a5ac0061073a5b42a900..0e7f00298213f3249b34f8564609c762d2e765ed 100644 (file)
 
 #include "ncp_fs.h"
 
-static void ncp_read_volume_list(struct file *, void *, filldir_t,
+static void ncp_read_volume_list(struct file *, struct dir_context *,
                                struct ncp_cache_control *);
-static void ncp_do_readdir(struct file *, void *, filldir_t,
+static void ncp_do_readdir(struct file *, struct dir_context *,
                                struct ncp_cache_control *);
 
-static int ncp_readdir(struct file *, void *, filldir_t);
+static int ncp_readdir(struct file *, struct dir_context *);
 
 static int ncp_create(struct inode *, struct dentry *, umode_t, bool);
 static struct dentry *ncp_lookup(struct inode *, struct dentry *, unsigned int);
@@ -49,7 +49,7 @@ const struct file_operations ncp_dir_operations =
 {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = ncp_readdir,
+       .iterate        = ncp_readdir,
        .unlocked_ioctl = ncp_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ncp_compat_ioctl,
@@ -424,9 +424,9 @@ static time_t ncp_obtain_mtime(struct dentry *dentry)
        return ncp_date_dos2unix(i.modifyTime, i.modifyDate);
 }
 
-static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int ncp_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = dentry->d_inode;
        struct page *page = NULL;
        struct ncp_server *server = NCP_SERVER(inode);
@@ -440,7 +440,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
 
        DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
                dentry->d_parent->d_name.name, dentry->d_name.name,
-               (int) filp->f_pos);
+               (int) ctx->pos);
 
        result = -EIO;
        /* Do not generate '.' and '..' when server is dead. */
@@ -448,16 +448,8 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
                goto out;
 
        result = 0;
-       if (filp->f_pos == 0) {
-               if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
-                       goto out;
-               filp->f_pos = 1;
-       }
-       if (filp->f_pos == 1) {
-               if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR))
-                       goto out;
-               filp->f_pos = 2;
-       }
+       if (!dir_emit_dots(file, ctx))
+               goto out;
 
        page = grab_cache_page(&inode->i_data, 0);
        if (!page)
@@ -469,7 +461,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (!PageUptodate(page) || !ctl.head.eof)
                goto init_cache;
 
-       if (filp->f_pos == 2) {
+       if (ctx->pos == 2) {
                if (jiffies - ctl.head.time >= NCP_MAX_AGE(server))
                        goto init_cache;
 
@@ -479,10 +471,10 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        goto init_cache;
        }
 
-       if (filp->f_pos > ctl.head.end)
+       if (ctx->pos > ctl.head.end)
                goto finished;
 
-       ctl.fpos = filp->f_pos + (NCP_DIRCACHE_START - 2);
+       ctl.fpos = ctx->pos + (NCP_DIRCACHE_START - 2);
        ctl.ofs  = ctl.fpos / NCP_DIRCACHE_SIZE;
        ctl.idx  = ctl.fpos % NCP_DIRCACHE_SIZE;
 
@@ -497,21 +489,21 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
                }
                while (ctl.idx < NCP_DIRCACHE_SIZE) {
                        struct dentry *dent;
-                       int res;
+                       bool over;
 
                        dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx],
-                                               dentry, filp->f_pos);
+                                               dentry, ctx->pos);
                        if (!dent)
                                goto invalid_cache;
-                       res = filldir(dirent, dent->d_name.name,
-                                       dent->d_name.len, filp->f_pos,
+                       over = !dir_emit(ctx, dent->d_name.name,
+                                       dent->d_name.len,
                                        dent->d_inode->i_ino, DT_UNKNOWN);
                        dput(dent);
-                       if (res)
+                       if (over)
                                goto finished;
-                       filp->f_pos += 1;
+                       ctx->pos += 1;
                        ctl.idx += 1;
-                       if (filp->f_pos > ctl.head.end)
+                       if (ctx->pos > ctl.head.end)
                                goto finished;
                }
                if (ctl.page) {
@@ -548,9 +540,9 @@ init_cache:
        ctl.valid  = 1;
 read_really:
        if (ncp_is_server_root(inode)) {
-               ncp_read_volume_list(filp, dirent, filldir, &ctl);
+               ncp_read_volume_list(file, ctx, &ctl);
        } else {
-               ncp_do_readdir(filp, dirent, filldir, &ctl);
+               ncp_do_readdir(file, ctx, &ctl);
        }
        ctl.head.end = ctl.fpos - 1;
        ctl.head.eof = ctl.valid;
@@ -573,11 +565,11 @@ out:
 }
 
 static int
-ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+ncp_fill_cache(struct file *file, struct dir_context *ctx,
                struct ncp_cache_control *ctrl, struct ncp_entry_info *entry,
                int inval_childs)
 {
-       struct dentry *newdent, *dentry = filp->f_path.dentry;
+       struct dentry *newdent, *dentry = file->f_path.dentry;
        struct inode *dir = dentry->d_inode;
        struct ncp_cache_control ctl = *ctrl;
        struct qstr qname;
@@ -666,15 +658,15 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
 end_advance:
        if (!valid)
                ctl.valid = 0;
-       if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
+       if (!ctl.filled && (ctl.fpos == ctx->pos)) {
                if (!ino)
                        ino = find_inode_number(dentry, &qname);
                if (!ino)
                        ino = iunique(dir->i_sb, 2);
-               ctl.filled = filldir(dirent, qname.name, qname.len,
-                                    filp->f_pos, ino, DT_UNKNOWN);
+               ctl.filled = !dir_emit(ctx, qname.name, qname.len,
+                                    ino, DT_UNKNOWN);
                if (!ctl.filled)
-                       filp->f_pos += 1;
+                       ctx->pos += 1;
        }
        ctl.fpos += 1;
        ctl.idx  += 1;
@@ -683,10 +675,10 @@ end_advance:
 }
 
 static void
-ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
+ncp_read_volume_list(struct file *file, struct dir_context *ctx,
                        struct ncp_cache_control *ctl)
 {
-       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = dentry->d_inode;
        struct ncp_server *server = NCP_SERVER(inode);
        struct ncp_volume_info info;
@@ -694,7 +686,7 @@ ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
        int i;
 
        DPRINTK("ncp_read_volume_list: pos=%ld\n",
-                       (unsigned long) filp->f_pos);
+                       (unsigned long) ctx->pos);
 
        for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) {
                int inval_dentry;
@@ -715,16 +707,16 @@ ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
                }
                inval_dentry = ncp_update_known_namespace(server, entry.i.volNumber, NULL);
                entry.volume = entry.i.volNumber;
-               if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, inval_dentry))
+               if (!ncp_fill_cache(file, ctx, ctl, &entry, inval_dentry))
                        return;
        }
 }
 
 static void
-ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir,
+ncp_do_readdir(struct file *file, struct dir_context *ctx,
                                                struct ncp_cache_control *ctl)
 {
-       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *dentry = file->f_path.dentry;
        struct inode *dir = dentry->d_inode;
        struct ncp_server *server = NCP_SERVER(dir);
        struct nw_search_sequence seq;
@@ -736,7 +728,7 @@ ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir,
 
        DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
                dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) filp->f_pos);
+               (unsigned long) ctx->pos);
        PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
                dentry->d_name.name, NCP_FINFO(dir)->volNumber,
                NCP_FINFO(dir)->dirEntNum);
@@ -778,7 +770,7 @@ ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir,
                        rpl += onerpl;
                        rpls -= onerpl;
                        entry.volume = entry.i.volNumber;
-                       if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, 0))
+                       if (!ncp_fill_cache(file, ctx, ctl, &entry, 0))
                                break;
                }
        } while (more);
index e093e73178b71467aa15eafa1d5327714eff284e..5d051419527baafc06f2ec4644549469380a478d 100644 (file)
@@ -46,7 +46,7 @@
 
 static int nfs_opendir(struct inode *, struct file *);
 static int nfs_closedir(struct inode *, struct file *);
-static int nfs_readdir(struct file *, void *, filldir_t);
+static int nfs_readdir(struct file *, struct dir_context *);
 static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
 static loff_t nfs_llseek_dir(struct file *, loff_t, int);
 static void nfs_readdir_clear_array(struct page*);
@@ -54,7 +54,7 @@ static void nfs_readdir_clear_array(struct page*);
 const struct file_operations nfs_dir_operations = {
        .llseek         = nfs_llseek_dir,
        .read           = generic_read_dir,
-       .readdir        = nfs_readdir,
+       .iterate        = nfs_readdir,
        .open           = nfs_opendir,
        .release        = nfs_closedir,
        .fsync          = nfs_fsync_dir,
@@ -147,6 +147,7 @@ typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
 typedef struct {
        struct file     *file;
        struct page     *page;
+       struct dir_context *ctx;
        unsigned long   page_index;
        u64             *dir_cookie;
        u64             last_cookie;
@@ -252,7 +253,7 @@ out:
 static
 int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
 {
-       loff_t diff = desc->file->f_pos - desc->current_index;
+       loff_t diff = desc->ctx->pos - desc->current_index;
        unsigned int index;
 
        if (diff < 0)
@@ -289,7 +290,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
                            || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) {
                                ctx->duped = 0;
                                ctx->attr_gencount = nfsi->attr_gencount;
-                       } else if (new_pos < desc->file->f_pos) {
+                       } else if (new_pos < desc->ctx->pos) {
                                if (ctx->duped > 0
                                    && ctx->dup_cookie == *desc->dir_cookie) {
                                        if (printk_ratelimit()) {
@@ -307,7 +308,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
                                ctx->dup_cookie = *desc->dir_cookie;
                                ctx->duped = -1;
                        }
-                       desc->file->f_pos = new_pos;
+                       desc->ctx->pos = new_pos;
                        desc->cache_entry_index = i;
                        return 0;
                }
@@ -405,13 +406,13 @@ different:
 }
 
 static
-bool nfs_use_readdirplus(struct inode *dir, struct file *filp)
+bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx)
 {
        if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
                return false;
        if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
                return true;
-       if (filp->f_pos == 0)
+       if (ctx->pos == 0)
                return true;
        return false;
 }
@@ -702,8 +703,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
  * Once we've found the start of the dirent within a page: fill 'er up...
  */
 static 
-int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
-                  filldir_t filldir)
+int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
 {
        struct file     *file = desc->file;
        int i = 0;
@@ -721,13 +721,12 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
                struct nfs_cache_array_entry *ent;
 
                ent = &array->array[i];
-               if (filldir(dirent, ent->string.name, ent->string.len,
-                   file->f_pos, nfs_compat_user_ino64(ent->ino),
-                   ent->d_type) < 0) {
+               if (!dir_emit(desc->ctx, ent->string.name, ent->string.len,
+                   nfs_compat_user_ino64(ent->ino), ent->d_type)) {
                        desc->eof = 1;
                        break;
                }
-               file->f_pos++;
+               desc->ctx->pos++;
                if (i < (array->size-1))
                        *desc->dir_cookie = array->array[i+1].cookie;
                else
@@ -759,8 +758,7 @@ out:
  *      directory in the page cache by the time we get here.
  */
 static inline
-int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
-                    filldir_t filldir)
+int uncached_readdir(nfs_readdir_descriptor_t *desc)
 {
        struct page     *page = NULL;
        int             status;
@@ -785,7 +783,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
        if (status < 0)
                goto out_release;
 
-       status = nfs_do_filldir(desc, dirent, filldir);
+       status = nfs_do_filldir(desc);
 
  out:
        dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
@@ -800,35 +798,36 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
    last cookie cache takes care of the common case of reading the
    whole directory.
  */
-static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int nfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry   *dentry = filp->f_path.dentry;
+       struct dentry   *dentry = file->f_path.dentry;
        struct inode    *inode = dentry->d_inode;
        nfs_readdir_descriptor_t my_desc,
                        *desc = &my_desc;
-       struct nfs_open_dir_context *dir_ctx = filp->private_data;
+       struct nfs_open_dir_context *dir_ctx = file->private_data;
        int res;
 
        dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
                        dentry->d_parent->d_name.name, dentry->d_name.name,
-                       (long long)filp->f_pos);
+                       (long long)ctx->pos);
        nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
 
        /*
-        * filp->f_pos points to the dirent entry number.
+        * ctx->pos points to the dirent entry number.
         * *desc->dir_cookie has the cookie for the next entry. We have
         * to either find the entry with the appropriate number or
         * revalidate the cookie.
         */
        memset(desc, 0, sizeof(*desc));
 
-       desc->file = filp;
+       desc->file = file;
+       desc->ctx = ctx;
        desc->dir_cookie = &dir_ctx->dir_cookie;
        desc->decode = NFS_PROTO(inode)->decode_dirent;
-       desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0;
+       desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
 
        nfs_block_sillyrename(dentry);
-       res = nfs_revalidate_mapping(inode, filp->f_mapping);
+       res = nfs_revalidate_mapping(inode, file->f_mapping);
        if (res < 0)
                goto out;
 
@@ -840,7 +839,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        /* This means either end of directory */
                        if (*desc->dir_cookie && desc->eof == 0) {
                                /* Or that the server has 'lost' a cookie */
-                               res = uncached_readdir(desc, dirent, filldir);
+                               res = uncached_readdir(desc);
                                if (res == 0)
                                        continue;
                        }
@@ -857,7 +856,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                if (res < 0)
                        break;
 
-               res = nfs_do_filldir(desc, dirent, filldir);
+               res = nfs_do_filldir(desc);
                if (res < 0)
                        break;
        } while (!desc->eof);
index a87a44f8411304218986faa171f40de1f9564cd2..6b4a79f4ad1d30a0ddf036df224ab80594274f38 100644 (file)
@@ -451,11 +451,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
  * - Called if either PG_private or PG_fscache is set on the page
  * - Caller holds page lock
  */
-static void nfs_invalidate_page(struct page *page, unsigned long offset)
+static void nfs_invalidate_page(struct page *page, unsigned int offset,
+                               unsigned int length)
 {
-       dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
+       dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
+                page, offset, length);
 
-       if (offset != 0)
+       if (offset != 0 || length < PAGE_CACHE_SIZE)
                return;
        /* Cancel any unstarted writes on this page */
        nfs_wb_page_cancel(page_file_mapping(page)->host, page);
index 4e9a21db867ae60afcc14a6ca362e978495c4a5c..105a3b080d1236c24afed6267354705fd8da5d94 100644 (file)
@@ -240,11 +240,16 @@ struct name_list {
        struct list_head list;
 };
 
+struct nfs4_dir_ctx {
+       struct dir_context ctx;
+       struct list_head names;
+};
+
 static int
 nfsd4_build_namelist(void *arg, const char *name, int namlen,
                loff_t offset, u64 ino, unsigned int d_type)
 {
-       struct list_head *names = arg;
+       struct nfs4_dir_ctx *ctx = arg;
        struct name_list *entry;
 
        if (namlen != HEXDIR_LEN - 1)
@@ -254,7 +259,7 @@ nfsd4_build_namelist(void *arg, const char *name, int namlen,
                return -ENOMEM;
        memcpy(entry->name, name, HEXDIR_LEN - 1);
        entry->name[HEXDIR_LEN - 1] = '\0';
-       list_add(&entry->list, names);
+       list_add(&entry->list, &ctx->names);
        return 0;
 }
 
@@ -263,7 +268,10 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
 {
        const struct cred *original_cred;
        struct dentry *dir = nn->rec_file->f_path.dentry;
-       LIST_HEAD(names);
+       struct nfs4_dir_ctx ctx = {
+               .ctx.actor = nfsd4_build_namelist,
+               .names = LIST_HEAD_INIT(ctx.names)
+       };
        int status;
 
        status = nfs4_save_creds(&original_cred);
@@ -276,11 +284,11 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
                return status;
        }
 
-       status = vfs_readdir(nn->rec_file, nfsd4_build_namelist, &names);
+       status = iterate_dir(nn->rec_file, &ctx.ctx);
        mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
-       while (!list_empty(&names)) {
+       while (!list_empty(&ctx.names)) {
                struct name_list *entry;
-               entry = list_entry(names.next, struct name_list, list);
+               entry = list_entry(ctx.names.next, struct name_list, list);
                if (!status) {
                        struct dentry *dentry;
                        dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
index 84ce601d80632ba1dd090cd319766cecbe4cac98..a6bc8a7423dba4c4544bd504bf5b34194a4c8d11 100644 (file)
@@ -1912,6 +1912,7 @@ struct buffered_dirent {
 };
 
 struct readdir_data {
+       struct dir_context ctx;
        char            *dirent;
        size_t          used;
        int             full;
@@ -1943,13 +1944,15 @@ static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen,
 static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
                                    struct readdir_cd *cdp, loff_t *offsetp)
 {
-       struct readdir_data buf;
        struct buffered_dirent *de;
        int host_err;
        int size;
        loff_t offset;
+       struct readdir_data buf = {
+               .ctx.actor = nfsd_buffered_filldir,
+               .dirent = (void *)__get_free_page(GFP_KERNEL)
+       };
 
-       buf.dirent = (void *)__get_free_page(GFP_KERNEL);
        if (!buf.dirent)
                return nfserrno(-ENOMEM);
 
@@ -1963,7 +1966,7 @@ static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
                buf.used = 0;
                buf.full = 0;
 
-               host_err = vfs_readdir(file, nfsd_buffered_filldir, &buf);
+               host_err = iterate_dir(file, &buf.ctx);
                if (buf.full)
                        host_err = 0;
 
index f30b017740a7baeaee8a5394443d790f855653c8..197a63e9d10215b54e28179b7976e6c0b56b5384 100644 (file)
@@ -256,22 +256,18 @@ static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
        de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
 }
 
-static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int nilfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       loff_t pos = filp->f_pos;
-       struct inode *inode = file_inode(filp);
+       loff_t pos = ctx->pos;
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned long npages = dir_pages(inode);
 /*     unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
-       unsigned char *types = NULL;
-       int ret;
 
        if (pos > inode->i_size - NILFS_DIR_REC_LEN(1))
-               goto success;
-
-       types = nilfs_filetype_table;
+               return 0;
 
        for ( ; n < npages; n++, offset = 0) {
                char *kaddr, *limit;
@@ -281,9 +277,8 @@ static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                if (IS_ERR(page)) {
                        nilfs_error(sb, __func__, "bad page in #%lu",
                                    inode->i_ino);
-                       filp->f_pos += PAGE_CACHE_SIZE - offset;
-                       ret = -EIO;
-                       goto done;
+                       ctx->pos += PAGE_CACHE_SIZE - offset;
+                       return -EIO;
                }
                kaddr = page_address(page);
                de = (struct nilfs_dir_entry *)(kaddr + offset);
@@ -293,35 +288,28 @@ static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        if (de->rec_len == 0) {
                                nilfs_error(sb, __func__,
                                            "zero-length directory entry");
-                               ret = -EIO;
                                nilfs_put_page(page);
-                               goto done;
+                               return -EIO;
                        }
                        if (de->inode) {
-                               int over;
-                               unsigned char d_type = DT_UNKNOWN;
+                               unsigned char t;
 
-                               if (types && de->file_type < NILFS_FT_MAX)
-                                       d_type = types[de->file_type];
+                               if (de->file_type < NILFS_FT_MAX)
+                                       t = nilfs_filetype_table[de->file_type];
+                               else
+                                       t = DT_UNKNOWN;
 
-                               offset = (char *)de - kaddr;
-                               over = filldir(dirent, de->name, de->name_len,
-                                               (n<<PAGE_CACHE_SHIFT) | offset,
-                                               le64_to_cpu(de->inode), d_type);
-                               if (over) {
+                               if (!dir_emit(ctx, de->name, de->name_len,
+                                               le64_to_cpu(de->inode), t)) {
                                        nilfs_put_page(page);
-                                       goto success;
+                                       return 0;
                                }
                        }
-                       filp->f_pos += nilfs_rec_len_from_disk(de->rec_len);
+                       ctx->pos += nilfs_rec_len_from_disk(de->rec_len);
                }
                nilfs_put_page(page);
        }
-
-success:
-       ret = 0;
-done:
-       return ret;
+       return 0;
 }
 
 /*
@@ -678,7 +666,7 @@ not_empty:
 const struct file_operations nilfs_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = nilfs_readdir,
+       .iterate        = nilfs_readdir,
        .unlocked_ioctl = nilfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = nilfs_compat_ioctl,
index fa9c05f97af487c367e16b298d26176a6719d78f..d267ea6aa1a0ecc2f405fd4cdcca1bfdd4e66aa2 100644 (file)
@@ -1372,7 +1372,7 @@ retry_writepage:
                 * The page may have dirty, unmapped buffers.  Make them
                 * freeable here, so the page does not leak.
                 */
-               block_invalidatepage(page, 0);
+               block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
                unlock_page(page);
                ntfs_debug("Write outside i_size - truncated?");
                return 0;
index aa411c3f20e932e39eefeaf2cb6b4435a7a1aeaa..9e38dafa3bc78ec71acc7acec733d9cd52c4f40a 100644 (file)
@@ -1004,13 +1004,11 @@ dir_err_out:
 /**
  * ntfs_filldir - ntfs specific filldir method
  * @vol:       current ntfs volume
- * @fpos:      position in the directory
  * @ndir:      ntfs inode of current directory
  * @ia_page:   page in which the index allocation buffer @ie is in resides
  * @ie:                current index entry
  * @name:      buffer to use for the converted name
- * @dirent:    vfs filldir callback context
- * @filldir:   vfs filldir callback
+ * @actor:     what to feed the entries to
  *
  * Convert the Unicode @name to the loaded NLS and pass it to the @filldir
  * callback.
@@ -1024,12 +1022,12 @@ dir_err_out:
  * retake the lock if we are returning a non-zero value as ntfs_readdir()
  * would need to drop the lock immediately anyway.
  */
-static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
+static inline int ntfs_filldir(ntfs_volume *vol,
                ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie,
-               u8 *name, void *dirent, filldir_t filldir)
+               u8 *name, struct dir_context *actor)
 {
        unsigned long mref;
-       int name_len, rc;
+       int name_len;
        unsigned dt_type;
        FILE_NAME_TYPE_FLAGS name_type;
 
@@ -1068,13 +1066,14 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
        if (ia_page)
                unlock_page(ia_page);
        ntfs_debug("Calling filldir for %s with len %i, fpos 0x%llx, inode "
-                       "0x%lx, DT_%s.", name, name_len, fpos, mref,
+                       "0x%lx, DT_%s.", name, name_len, actor->pos, mref,
                        dt_type == DT_DIR ? "DIR" : "REG");
-       rc = filldir(dirent, name, name_len, fpos, mref, dt_type);
+       if (!dir_emit(actor, name, name_len, mref, dt_type))
+               return 1;
        /* Relock the page but not if we are aborting ->readdir. */
-       if (!rc && ia_page)
+       if (ia_page)
                lock_page(ia_page);
-       return rc;
+       return 0;
 }
 
 /*
@@ -1097,11 +1096,11 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
  *            removes them again after the write is complete after which it 
  *            unlocks the page.
  */
-static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int ntfs_readdir(struct file *file, struct dir_context *actor)
 {
        s64 ia_pos, ia_start, prev_ia_pos, bmp_pos;
-       loff_t fpos, i_size;
-       struct inode *bmp_vi, *vdir = file_inode(filp);
+       loff_t i_size;
+       struct inode *bmp_vi, *vdir = file_inode(file);
        struct super_block *sb = vdir->i_sb;
        ntfs_inode *ndir = NTFS_I(vdir);
        ntfs_volume *vol = NTFS_SB(sb);
@@ -1116,33 +1115,16 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        u8 *kaddr, *bmp, *index_end;
        ntfs_attr_search_ctx *ctx;
 
-       fpos = filp->f_pos;
        ntfs_debug("Entering for inode 0x%lx, fpos 0x%llx.",
-                       vdir->i_ino, fpos);
+                       vdir->i_ino, actor->pos);
        rc = err = 0;
        /* Are we at end of dir yet? */
        i_size = i_size_read(vdir);
-       if (fpos >= i_size + vol->mft_record_size)
-               goto done;
+       if (actor->pos >= i_size + vol->mft_record_size)
+               return 0;
        /* Emulate . and .. for all directories. */
-       if (!fpos) {
-               ntfs_debug("Calling filldir for . with len 1, fpos 0x0, "
-                               "inode 0x%lx, DT_DIR.", vdir->i_ino);
-               rc = filldir(dirent, ".", 1, fpos, vdir->i_ino, DT_DIR);
-               if (rc)
-                       goto done;
-               fpos++;
-       }
-       if (fpos == 1) {
-               ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, "
-                               "inode 0x%lx, DT_DIR.",
-                               (unsigned long)parent_ino(filp->f_path.dentry));
-               rc = filldir(dirent, "..", 2, fpos,
-                               parent_ino(filp->f_path.dentry), DT_DIR);
-               if (rc)
-                       goto done;
-               fpos++;
-       }
+       if (!dir_emit_dots(file, actor))
+               return 0;
        m = NULL;
        ctx = NULL;
        /*
@@ -1155,7 +1137,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                goto err_out;
        }
        /* Are we jumping straight into the index allocation attribute? */
-       if (fpos >= vol->mft_record_size)
+       if (actor->pos >= vol->mft_record_size)
                goto skip_index_root;
        /* Get hold of the mft record for the directory. */
        m = map_mft_record(ndir);
@@ -1170,7 +1152,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                goto err_out;
        }
        /* Get the offset into the index root attribute. */
-       ir_pos = (s64)fpos;
+       ir_pos = (s64)actor->pos;
        /* Find the index root attribute in the mft record. */
        err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
                        0, ctx);
@@ -1226,10 +1208,9 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                if (ir_pos > (u8*)ie - (u8*)ir)
                        continue;
                /* Advance the position even if going to skip the entry. */
-               fpos = (u8*)ie - (u8*)ir;
+               actor->pos = (u8*)ie - (u8*)ir;
                /* Submit the name to the filldir callback. */
-               rc = ntfs_filldir(vol, fpos, ndir, NULL, ie, name, dirent,
-                               filldir);
+               rc = ntfs_filldir(vol, ndir, NULL, ie, name, actor);
                if (rc) {
                        kfree(ir);
                        goto abort;
@@ -1242,12 +1223,12 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (!NInoIndexAllocPresent(ndir))
                goto EOD;
        /* Advance fpos to the beginning of the index allocation. */
-       fpos = vol->mft_record_size;
+       actor->pos = vol->mft_record_size;
 skip_index_root:
        kaddr = NULL;
        prev_ia_pos = -1LL;
        /* Get the offset into the index allocation attribute. */
-       ia_pos = (s64)fpos - vol->mft_record_size;
+       ia_pos = (s64)actor->pos - vol->mft_record_size;
        ia_mapping = vdir->i_mapping;
        ntfs_debug("Inode 0x%lx, getting index bitmap.", vdir->i_ino);
        bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4);
@@ -1409,7 +1390,7 @@ find_next_index_buffer:
                if (ia_pos - ia_start > (u8*)ie - (u8*)ia)
                        continue;
                /* Advance the position even if going to skip the entry. */
-               fpos = (u8*)ie - (u8*)ia +
+               actor->pos = (u8*)ie - (u8*)ia +
                                (sle64_to_cpu(ia->index_block_vcn) <<
                                ndir->itype.index.vcn_size_bits) +
                                vol->mft_record_size;
@@ -1419,8 +1400,7 @@ find_next_index_buffer:
                 * before returning, unless a non-zero value is returned in
                 * which case the page is left unlocked.
                 */
-               rc = ntfs_filldir(vol, fpos, ndir, ia_page, ie, name, dirent,
-                               filldir);
+               rc = ntfs_filldir(vol, ndir, ia_page, ie, name, actor);
                if (rc) {
                        /* @ia_page is already unlocked in this case. */
                        ntfs_unmap_page(ia_page);
@@ -1439,18 +1419,9 @@ unm_EOD:
        iput(bmp_vi);
 EOD:
        /* We are finished, set fpos to EOD. */
-       fpos = i_size + vol->mft_record_size;
+       actor->pos = i_size + vol->mft_record_size;
 abort:
        kfree(name);
-done:
-#ifdef DEBUG
-       if (!rc)
-               ntfs_debug("EOD, fpos 0x%llx, returning 0.", fpos);
-       else
-               ntfs_debug("filldir returned %i, fpos 0x%llx, returning 0.",
-                               rc, fpos);
-#endif
-       filp->f_pos = fpos;
        return 0;
 err_out:
        if (bmp_page) {
@@ -1471,7 +1442,6 @@ iput_err_out:
        if (!err)
                err = -EIO;
        ntfs_debug("Failed. Returning error code %i.", -err);
-       filp->f_pos = fpos;
        return err;
 }
 
@@ -1571,7 +1541,7 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
 const struct file_operations ntfs_dir_ops = {
        .llseek         = generic_file_llseek,  /* Seek inside directory. */
        .read           = generic_read_dir,     /* Return -EISDIR. */
-       .readdir        = ntfs_readdir,         /* Read directory contents. */
+       .iterate        = ntfs_readdir,         /* Read directory contents. */
 #ifdef NTFS_RW
        .fsync          = ntfs_dir_fsync,       /* Sync a directory to disk. */
        /*.aio_fsync    = ,*/                   /* Sync all outstanding async
index 20dfec72e90330a9b613e2a78ee5b59874870925..79736a28d84f911dd66acca86c844912284ad9fa 100644 (file)
@@ -603,11 +603,12 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
  * from ext3.  PageChecked() bits have been removed as OCFS2 does not
  * do journalled data.
  */
-static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
+static void ocfs2_invalidatepage(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
        journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
 
-       jbd2_journal_invalidatepage(journal, page, offset);
+       jbd2_journal_invalidatepage(journal, page, offset, length);
 }
 
 static int ocfs2_releasepage(struct page *page, gfp_t wait)
index f1e1aed8f638119db018797d93f977dd8d553cd7..eb760d8acd500c96bd45195ce2a2c79fb0942ed5 100644 (file)
@@ -1761,11 +1761,10 @@ bail:
 
 static int ocfs2_dir_foreach_blk_id(struct inode *inode,
                                    u64 *f_version,
-                                   loff_t *f_pos, void *priv,
-                                   filldir_t filldir, int *filldir_err)
+                                   struct dir_context *ctx)
 {
-       int ret, i, filldir_ret;
-       unsigned long offset = *f_pos;
+       int ret, i;
+       unsigned long offset = ctx->pos;
        struct buffer_head *di_bh = NULL;
        struct ocfs2_dinode *di;
        struct ocfs2_inline_data *data;
@@ -1781,8 +1780,7 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
        di = (struct ocfs2_dinode *)di_bh->b_data;
        data = &di->id2.i_data;
 
-       while (*f_pos < i_size_read(inode)) {
-revalidate:
+       while (ctx->pos < i_size_read(inode)) {
                /* If the dir block has changed since the last call to
                 * readdir(2), then we might be pointing to an invalid
                 * dirent right now.  Scan from the start of the block
@@ -1802,50 +1800,31 @@ revalidate:
                                        break;
                                i += le16_to_cpu(de->rec_len);
                        }
-                       *f_pos = offset = i;
+                       ctx->pos = offset = i;
                        *f_version = inode->i_version;
                }
 
-               de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos);
-               if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) {
+               de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
+               if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
                        /* On error, skip the f_pos to the end. */
-                       *f_pos = i_size_read(inode);
-                       goto out;
+                       ctx->pos = i_size_read(inode);
+                       break;
                }
                offset += le16_to_cpu(de->rec_len);
                if (le64_to_cpu(de->inode)) {
-                       /* We might block in the next section
-                        * if the data destination is
-                        * currently swapped out.  So, use a
-                        * version stamp to detect whether or
-                        * not the directory has been modified
-                        * during the copy operation.
-                        */
-                       u64 version = *f_version;
                        unsigned char d_type = DT_UNKNOWN;
 
                        if (de->file_type < OCFS2_FT_MAX)
                                d_type = ocfs2_filetype_table[de->file_type];
 
-                       filldir_ret = filldir(priv, de->name,
-                                             de->name_len,
-                                             *f_pos,
-                                             le64_to_cpu(de->inode),
-                                             d_type);
-                       if (filldir_ret) {
-                               if (filldir_err)
-                                       *filldir_err = filldir_ret;
-                               break;
-                       }
-                       if (version != *f_version)
-                               goto revalidate;
+                       if (!dir_emit(ctx, de->name, de->name_len,
+                                     le64_to_cpu(de->inode), d_type))
+                               goto out;
                }
-               *f_pos += le16_to_cpu(de->rec_len);
+               ctx->pos += le16_to_cpu(de->rec_len);
        }
-
 out:
        brelse(di_bh);
-
        return 0;
 }
 
@@ -1855,27 +1834,26 @@ out:
  */
 static int ocfs2_dir_foreach_blk_el(struct inode *inode,
                                    u64 *f_version,
-                                   loff_t *f_pos, void *priv,
-                                   filldir_t filldir, int *filldir_err)
+                                   struct dir_context *ctx,
+                                   bool persist)
 {
-       int error = 0;
        unsigned long offset, blk, last_ra_blk = 0;
-       int i, stored;
+       int i;
        struct buffer_head * bh, * tmp;
        struct ocfs2_dir_entry * de;
        struct super_block * sb = inode->i_sb;
        unsigned int ra_sectors = 16;
+       int stored = 0;
 
-       stored = 0;
        bh = NULL;
 
-       offset = (*f_pos) & (sb->s_blocksize - 1);
+       offset = ctx->pos & (sb->s_blocksize - 1);
 
-       while (!error && !stored && *f_pos < i_size_read(inode)) {
-               blk = (*f_pos) >> sb->s_blocksize_bits;
+       while (ctx->pos < i_size_read(inode)) {
+               blk = ctx->pos >> sb->s_blocksize_bits;
                if (ocfs2_read_dir_block(inode, blk, &bh, 0)) {
                        /* Skip the corrupt dirblock and keep trying */
-                       *f_pos += sb->s_blocksize - offset;
+                       ctx->pos += sb->s_blocksize - offset;
                        continue;
                }
 
@@ -1897,7 +1875,6 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
                        ra_sectors = 8;
                }
 
-revalidate:
                /* If the dir block has changed since the last call to
                 * readdir(2), then we might be pointing to an invalid
                 * dirent right now.  Scan from the start of the block
@@ -1917,93 +1894,64 @@ revalidate:
                                i += le16_to_cpu(de->rec_len);
                        }
                        offset = i;
-                       *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1))
+                       ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
                                | offset;
                        *f_version = inode->i_version;
                }
 
-               while (!error && *f_pos < i_size_read(inode)
+               while (ctx->pos < i_size_read(inode)
                       && offset < sb->s_blocksize) {
                        de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
                        if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
                                /* On error, skip the f_pos to the
                                   next block. */
-                               *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1;
+                               ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
                                brelse(bh);
-                               goto out;
+                               continue;
                        }
-                       offset += le16_to_cpu(de->rec_len);
                        if (le64_to_cpu(de->inode)) {
-                               /* We might block in the next section
-                                * if the data destination is
-                                * currently swapped out.  So, use a
-                                * version stamp to detect whether or
-                                * not the directory has been modified
-                                * during the copy operation.
-                                */
-                               unsigned long version = *f_version;
                                unsigned char d_type = DT_UNKNOWN;
 
                                if (de->file_type < OCFS2_FT_MAX)
                                        d_type = ocfs2_filetype_table[de->file_type];
-                               error = filldir(priv, de->name,
+                               if (!dir_emit(ctx, de->name,
                                                de->name_len,
-                                               *f_pos,
                                                le64_to_cpu(de->inode),
-                                               d_type);
-                               if (error) {
-                                       if (filldir_err)
-                                               *filldir_err = error;
-                                       break;
+                                               d_type)) {
+                                       brelse(bh);
+                                       return 0;
                                }
-                               if (version != *f_version)
-                                       goto revalidate;
-                               stored ++;
+                               stored++;
                        }
-                       *f_pos += le16_to_cpu(de->rec_len);
+                       offset += le16_to_cpu(de->rec_len);
+                       ctx->pos += le16_to_cpu(de->rec_len);
                }
                offset = 0;
                brelse(bh);
                bh = NULL;
+               if (!persist && stored)
+                       break;
        }
-
-       stored = 0;
-out:
-       return stored;
+       return 0;
 }
 
 static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
-                                loff_t *f_pos, void *priv, filldir_t filldir,
-                                int *filldir_err)
+                                struct dir_context *ctx,
+                                bool persist)
 {
        if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
-               return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv,
-                                               filldir, filldir_err);
-
-       return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir,
-                                       filldir_err);
+               return ocfs2_dir_foreach_blk_id(inode, f_version, ctx);
+       return ocfs2_dir_foreach_blk_el(inode, f_version, ctx, persist);
 }
 
 /*
  * This is intended to be called from inside other kernel functions,
  * so we fake some arguments.
  */
-int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
-                     filldir_t filldir)
+int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
 {
-       int ret = 0, filldir_err = 0;
        u64 version = inode->i_version;
-
-       while (*f_pos < i_size_read(inode)) {
-               ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv,
-                                           filldir, &filldir_err);
-               if (ret || filldir_err)
-                       break;
-       }
-
-       if (ret > 0)
-               ret = -EIO;
-
+       ocfs2_dir_foreach_blk(inode, &version, ctx, true);
        return 0;
 }
 
@@ -2011,15 +1959,15 @@ int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
  * ocfs2_readdir()
  *
  */
-int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
+int ocfs2_readdir(struct file *file, struct dir_context *ctx)
 {
        int error = 0;
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        int lock_level = 0;
 
        trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
 
-       error = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
+       error = ocfs2_inode_lock_atime(inode, file->f_path.mnt, &lock_level);
        if (lock_level && error >= 0) {
                /* We release EX lock which used to update atime
                 * and get PR lock again to reduce contention
@@ -2035,8 +1983,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
                goto bail_nolock;
        }
 
-       error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos,
-                                     dirent, filldir, NULL);
+       error = ocfs2_dir_foreach_blk(inode, &file->f_version, ctx, false);
 
        ocfs2_inode_unlock(inode, lock_level);
        if (error)
@@ -2120,6 +2067,7 @@ bail:
 }
 
 struct ocfs2_empty_dir_priv {
+       struct dir_context ctx;
        unsigned seen_dot;
        unsigned seen_dot_dot;
        unsigned seen_other;
@@ -2204,8 +2152,9 @@ out:
 int ocfs2_empty_dir(struct inode *inode)
 {
        int ret;
-       loff_t start = 0;
-       struct ocfs2_empty_dir_priv priv;
+       struct ocfs2_empty_dir_priv priv = {
+               .ctx.actor = ocfs2_empty_dir_filldir
+       };
 
        memset(&priv, 0, sizeof(priv));
 
@@ -2219,7 +2168,7 @@ int ocfs2_empty_dir(struct inode *inode)
                 */
        }
 
-       ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir);
+       ret = ocfs2_dir_foreach(inode, &priv.ctx);
        if (ret)
                mlog_errno(ret);
 
index e683f3deb64503a3167198a88d37070cf1ee0eba..f0344b75b14d26631993f260bc2e9f8aa9204892 100644 (file)
@@ -92,9 +92,8 @@ int ocfs2_find_files_on_disk(const char *name,
                             struct ocfs2_dir_lookup_result *res);
 int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
                               int namelen, u64 *blkno);
-int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir);
-int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
-                     filldir_t filldir);
+int ocfs2_readdir(struct file *file, struct dir_context *ctx);
+int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx);
 int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
                                 struct inode *dir,
                                 struct buffer_head *parent_fe_bh,
index ff54014a24ecd58511c37a16b33f5e27a1e3e6dd..8a38714f1d92ec98b197e28e1a06adcce6e6b582 100644 (file)
@@ -2712,7 +2712,7 @@ const struct file_operations ocfs2_fops = {
 const struct file_operations ocfs2_dops = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = ocfs2_readdir,
+       .iterate        = ocfs2_readdir,
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_dir_release,
        .open           = ocfs2_dir_open,
@@ -2759,7 +2759,7 @@ const struct file_operations ocfs2_fops_no_plocks = {
 const struct file_operations ocfs2_dops_no_plocks = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = ocfs2_readdir,
+       .iterate        = ocfs2_readdir,
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_dir_release,
        .open           = ocfs2_dir_open,
index 8eccfabcd12ed7a4b229499445585e662a977479..242170d83971a9bef488197111154db6c75cba0b 100644 (file)
@@ -1941,6 +1941,7 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
 }
 
 struct ocfs2_orphan_filldir_priv {
+       struct dir_context      ctx;
        struct inode            *head;
        struct ocfs2_super      *osb;
 };
@@ -1977,11 +1978,11 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
 {
        int status;
        struct inode *orphan_dir_inode = NULL;
-       struct ocfs2_orphan_filldir_priv priv;
-       loff_t pos = 0;
-
-       priv.osb = osb;
-       priv.head = *head;
+       struct ocfs2_orphan_filldir_priv priv = {
+               .ctx.actor = ocfs2_orphan_filldir,
+               .osb = osb,
+               .head = *head
+       };
 
        orphan_dir_inode = ocfs2_get_system_file_inode(osb,
                                                       ORPHAN_DIR_SYSTEM_INODE,
@@ -1999,8 +2000,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
                goto out;
        }
 
-       status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
-                                  ocfs2_orphan_filldir);
+       status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx);
        if (status) {
                mlog_errno(status);
                goto out_cluster;
index acbaebcad3a860c7c5a1d4a2ceb78ef10be2eae3..1b8e9e8405b230169318a1d60df41c5b0b4f502d 100644 (file)
@@ -327,26 +327,23 @@ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
        return is_bad;
 }
 
-static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir,
+static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx,
                u64 fsblock, int hindex)
 {
-       struct inode *dir = file_inode(filp);
-       struct buffer_head *bh;
-       struct omfs_inode *oi;
-       u64 self;
-       int res = 0;
-       unsigned char d_type;
-
        /* follow chain in this bucket */
        while (fsblock != ~0) {
-               bh = omfs_bread(dir->i_sb, fsblock);
+               struct buffer_head *bh = omfs_bread(dir->i_sb, fsblock);
+               struct omfs_inode *oi;
+               u64 self;
+               unsigned char d_type;
+
                if (!bh)
-                       goto out;
+                       return true;
 
                oi = (struct omfs_inode *) bh->b_data;
                if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) {
                        brelse(bh);
-                       goto out;
+                       return true;
                }
 
                self = fsblock;
@@ -361,15 +358,16 @@ static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir,
 
                d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG;
 
-               res = filldir(dirent, oi->i_name, strnlen(oi->i_name,
-                       OMFS_NAMELEN), filp->f_pos, self, d_type);
+               if (!dir_emit(ctx, oi->i_name,
+                             strnlen(oi->i_name, OMFS_NAMELEN),
+                             self, d_type)) {
+                       brelse(bh);
+                       return false;
+               }
                brelse(bh);
-               if (res < 0)
-                       break;
-               filp->f_pos++;
+               ctx->pos++;
        }
-out:
-       return res;
+       return true;
 }
 
 static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -403,60 +401,44 @@ out:
        return err;
 }
 
-static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int omfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *dir = file_inode(filp);
+       struct inode *dir = file_inode(file);
        struct buffer_head *bh;
-       loff_t offset, res;
+       __be64 *p;
        unsigned int hchain, hindex;
        int nbuckets;
-       u64 fsblock;
-       int ret = -EINVAL;
-
-       if (filp->f_pos >> 32)
-               goto success;
-
-       switch ((unsigned long) filp->f_pos) {
-       case 0:
-               if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0)
-                       goto success;
-               filp->f_pos++;
-               /* fall through */
-       case 1:
-               if (filldir(dirent, "..", 2, 1,
-                   parent_ino(filp->f_dentry), DT_DIR) < 0)
-                       goto success;
-               filp->f_pos = 1 << 20;
-               /* fall through */
+
+       if (ctx->pos >> 32)
+               return -EINVAL;
+
+       if (ctx->pos < 1 << 20) {
+               if (!dir_emit_dots(file, ctx))
+                       return 0;
+               ctx->pos = 1 << 20;
        }
 
        nbuckets = (dir->i_size - OMFS_DIR_START) / 8;
 
        /* high 12 bits store bucket + 1 and low 20 bits store hash index */
-       hchain = (filp->f_pos >> 20) - 1;
-       hindex = filp->f_pos & 0xfffff;
+       hchain = (ctx->pos >> 20) - 1;
+       hindex = ctx->pos & 0xfffff;
 
        bh = omfs_bread(dir->i_sb, dir->i_ino);
        if (!bh)
-               goto out;
+               return -EINVAL;
 
-       offset = OMFS_DIR_START + hchain * 8;
+       p = (__be64 *)(bh->b_data + OMFS_DIR_START) + hchain;
 
-       for (; hchain < nbuckets; hchain++, offset += 8) {
-               fsblock = be64_to_cpu(*((__be64 *) &bh->b_data[offset]));
-
-               res = omfs_fill_chain(filp, dirent, filldir, fsblock, hindex);
-               hindex = 0;
-               if (res < 0)
+       for (; hchain < nbuckets; hchain++) {
+               __u64 fsblock = be64_to_cpu(*p++);
+               if (!omfs_fill_chain(dir, ctx, fsblock, hindex))
                        break;
-
-               filp->f_pos = (hchain+2) << 20;
+               hindex = 0;
+               ctx->pos = (hchain+2) << 20;
        }
        brelse(bh);
-success:
-       ret = 0;
-out:
-       return ret;
+       return 0;
 }
 
 const struct inode_operations omfs_dir_inops = {
@@ -470,6 +452,6 @@ const struct inode_operations omfs_dir_inops = {
 
 const struct file_operations omfs_dir_operations = {
        .read = generic_read_dir,
-       .readdir = omfs_readdir,
+       .iterate = omfs_readdir,
        .llseek = generic_file_llseek,
 };
index 75885ffde44e58a967799d3b461181e74bb8fd9c..8c0ceb8dd1f709a6b3b6427eabc28718f23f9a50 100644 (file)
@@ -162,11 +162,11 @@ static const struct file_operations openpromfs_prop_ops = {
        .release        = seq_release,
 };
 
-static int openpromfs_readdir(struct file *, void *, filldir_t);
+static int openpromfs_readdir(struct file *, struct dir_context *);
 
 static const struct file_operations openprom_operations = {
        .read           = generic_read_dir,
-       .readdir        = openpromfs_readdir,
+       .iterate        = openpromfs_readdir,
        .llseek         = generic_file_llseek,
 };
 
@@ -260,71 +260,64 @@ found:
        return NULL;
 }
 
-static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static int openpromfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct op_inode_info *oi = OP_I(inode);
        struct device_node *dp = oi->u.node;
        struct device_node *child;
        struct property *prop;
-       unsigned int ino;
        int i;
 
        mutex_lock(&op_mutex);
        
-       ino = inode->i_ino;
-       i = filp->f_pos;
-       switch (i) {
-       case 0:
-               if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+       if (ctx->pos == 0) {
+               if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR))
                        goto out;
-               i++;
-               filp->f_pos++;
-               /* fall thru */
-       case 1:
-               if (filldir(dirent, "..", 2, i,
+               ctx->pos = 1;
+       }
+       if (ctx->pos == 1) {
+               if (!dir_emit(ctx, "..", 2,
                            (dp->parent == NULL ?
                             OPENPROM_ROOT_INO :
-                            dp->parent->unique_id), DT_DIR) < 0) 
+                            dp->parent->unique_id), DT_DIR))
                        goto out;
-               i++;
-               filp->f_pos++;
-               /* fall thru */
-       default:
-               i -= 2;
-
-               /* First, the children nodes as directories.  */
-               child = dp->child;
-               while (i && child) {
-                       child = child->sibling;
-                       i--;
-               }
-               while (child) {
-                       if (filldir(dirent,
-                                   child->path_component_name,
-                                   strlen(child->path_component_name),
-                                   filp->f_pos, child->unique_id, DT_DIR) < 0)
-                               goto out;
-
-                       filp->f_pos++;
-                       child = child->sibling;
-               }
+               ctx->pos = 2;
+       }
+       i = ctx->pos - 2;
 
-               /* Next, the properties as files.  */
-               prop = dp->properties;
-               while (i && prop) {
-                       prop = prop->next;
-                       i--;
-               }
-               while (prop) {
-                       if (filldir(dirent, prop->name, strlen(prop->name),
-                                   filp->f_pos, prop->unique_id, DT_REG) < 0)
-                               goto out;
+       /* First, the children nodes as directories.  */
+       child = dp->child;
+       while (i && child) {
+               child = child->sibling;
+               i--;
+       }
+       while (child) {
+               if (!dir_emit(ctx,
+                           child->path_component_name,
+                           strlen(child->path_component_name),
+                           child->unique_id, DT_DIR))
+                       goto out;
 
-                       filp->f_pos++;
-                       prop = prop->next;
-               }
+               ctx->pos++;
+               child = child->sibling;
+       }
+
+       /* Next, the properties as files.  */
+       prop = dp->properties;
+       while (i && prop) {
+               prop = prop->next;
+               i--;
        }
+       while (prop) {
+               if (!dir_emit(ctx, prop->name, strlen(prop->name),
+                           prop->unique_id, DT_REG))
+                       goto out;
+
+               ctx->pos++;
+               prop = prop->next;
+       }
+
 out:
        mutex_unlock(&op_mutex);
        return 0;
index c3834dad09b3bce4dccec2180478d852ffddb70d..0016350ad95e13a646978f8d3f19fb1afa145424 100644 (file)
@@ -1681,11 +1681,11 @@ const struct dentry_operations pid_dentry_operations =
  * reported by readdir in sync with the inode numbers reported
  * by stat.
  */
-int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+bool proc_fill_cache(struct file *file, struct dir_context *ctx,
        const char *name, int len,
        instantiate_t instantiate, struct task_struct *task, const void *ptr)
 {
-       struct dentry *child, *dir = filp->f_path.dentry;
+       struct dentry *child, *dir = file->f_path.dentry;
        struct inode *inode;
        struct qstr qname;
        ino_t ino = 0;
@@ -1720,7 +1720,7 @@ end_instantiate:
                ino = find_inode_number(dir, &qname);
        if (!ino)
                ino = 1;
-       return filldir(dirent, name, len, filp->f_pos, ino, type);
+       return dir_emit(ctx, name, len, ino, type);
 }
 
 #ifdef CONFIG_CHECKPOINT_RESTORE
@@ -1931,14 +1931,15 @@ static const struct inode_operations proc_map_files_inode_operations = {
 };
 
 static int
-proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
+proc_map_files_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
        struct vm_area_struct *vma;
        struct task_struct *task;
        struct mm_struct *mm;
-       ino_t ino;
+       unsigned long nr_files, pos, i;
+       struct flex_array *fa = NULL;
+       struct map_files_info info;
+       struct map_files_info *p;
        int ret;
 
        ret = -EPERM;
@@ -1946,7 +1947,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
                goto out;
 
        ret = -ENOENT;
-       task = get_proc_task(inode);
+       task = get_proc_task(file_inode(file));
        if (!task)
                goto out;
 
@@ -1955,91 +1956,73 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
                goto out_put_task;
 
        ret = 0;
-       switch (filp->f_pos) {
-       case 0:
-               ino = inode->i_ino;
-               if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
-                       goto out_put_task;
-               filp->f_pos++;
-       case 1:
-               ino = parent_ino(dentry);
-               if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
-                       goto out_put_task;
-               filp->f_pos++;
-       default:
-       {
-               unsigned long nr_files, pos, i;
-               struct flex_array *fa = NULL;
-               struct map_files_info info;
-               struct map_files_info *p;
-
-               mm = get_task_mm(task);
-               if (!mm)
-                       goto out_put_task;
-               down_read(&mm->mmap_sem);
+       if (!dir_emit_dots(file, ctx))
+               goto out_put_task;
 
-               nr_files = 0;
+       mm = get_task_mm(task);
+       if (!mm)
+               goto out_put_task;
+       down_read(&mm->mmap_sem);
 
-               /*
-                * We need two passes here:
-                *
-                *  1) Collect vmas of mapped files with mmap_sem taken
-                *  2) Release mmap_sem and instantiate entries
-                *
-                * otherwise we get lockdep complained, since filldir()
-                * routine might require mmap_sem taken in might_fault().
-                */
+       nr_files = 0;
 
-               for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
-                       if (vma->vm_file && ++pos > filp->f_pos)
-                               nr_files++;
-               }
+       /*
+        * We need two passes here:
+        *
+        *  1) Collect vmas of mapped files with mmap_sem taken
+        *  2) Release mmap_sem and instantiate entries
+        *
+        * otherwise we get lockdep complained, since filldir()
+        * routine might require mmap_sem taken in might_fault().
+        */
 
-               if (nr_files) {
-                       fa = flex_array_alloc(sizeof(info), nr_files,
-                                               GFP_KERNEL);
-                       if (!fa || flex_array_prealloc(fa, 0, nr_files,
-                                                       GFP_KERNEL)) {
-                               ret = -ENOMEM;
-                               if (fa)
-                                       flex_array_free(fa);
-                               up_read(&mm->mmap_sem);
-                               mmput(mm);
-                               goto out_put_task;
-                       }
-                       for (i = 0, vma = mm->mmap, pos = 2; vma;
-                                       vma = vma->vm_next) {
-                               if (!vma->vm_file)
-                                       continue;
-                               if (++pos <= filp->f_pos)
-                                       continue;
-
-                               info.mode = vma->vm_file->f_mode;
-                               info.len = snprintf(info.name,
-                                               sizeof(info.name), "%lx-%lx",
-                                               vma->vm_start, vma->vm_end);
-                               if (flex_array_put(fa, i++, &info, GFP_KERNEL))
-                                       BUG();
-                       }
+       for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
+               if (vma->vm_file && ++pos > ctx->pos)
+                       nr_files++;
+       }
+
+       if (nr_files) {
+               fa = flex_array_alloc(sizeof(info), nr_files,
+                                       GFP_KERNEL);
+               if (!fa || flex_array_prealloc(fa, 0, nr_files,
+                                               GFP_KERNEL)) {
+                       ret = -ENOMEM;
+                       if (fa)
+                               flex_array_free(fa);
+                       up_read(&mm->mmap_sem);
+                       mmput(mm);
+                       goto out_put_task;
                }
-               up_read(&mm->mmap_sem);
-
-               for (i = 0; i < nr_files; i++) {
-                       p = flex_array_get(fa, i);
-                       ret = proc_fill_cache(filp, dirent, filldir,
-                                             p->name, p->len,
-                                             proc_map_files_instantiate,
-                                             task,
-                                             (void *)(unsigned long)p->mode);
-                       if (ret)
-                               break;
-                       filp->f_pos++;
+               for (i = 0, vma = mm->mmap, pos = 2; vma;
+                               vma = vma->vm_next) {
+                       if (!vma->vm_file)
+                               continue;
+                       if (++pos <= ctx->pos)
+                               continue;
+
+                       info.mode = vma->vm_file->f_mode;
+                       info.len = snprintf(info.name,
+                                       sizeof(info.name), "%lx-%lx",
+                                       vma->vm_start, vma->vm_end);
+                       if (flex_array_put(fa, i++, &info, GFP_KERNEL))
+                               BUG();
                }
-               if (fa)
-                       flex_array_free(fa);
-               mmput(mm);
        }
+       up_read(&mm->mmap_sem);
+
+       for (i = 0; i < nr_files; i++) {
+               p = flex_array_get(fa, i);
+               if (!proc_fill_cache(file, ctx,
+                                     p->name, p->len,
+                                     proc_map_files_instantiate,
+                                     task,
+                                     (void *)(unsigned long)p->mode))
+                       break;
+               ctx->pos++;
        }
+       if (fa)
+               flex_array_free(fa);
+       mmput(mm);
 
 out_put_task:
        put_task_struct(task);
@@ -2049,7 +2032,7 @@ out:
 
 static const struct file_operations proc_map_files_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_map_files_readdir,
+       .iterate        = proc_map_files_readdir,
        .llseek         = default_llseek,
 };
 
@@ -2217,67 +2200,30 @@ out_no_task:
        return error;
 }
 
-static int proc_pident_fill_cache(struct file *filp, void *dirent,
-       filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
-{
-       return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
-                               proc_pident_instantiate, task, p);
-}
-
-static int proc_pident_readdir(struct file *filp,
-               void *dirent, filldir_t filldir,
+static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
                const struct pid_entry *ents, unsigned int nents)
 {
-       int i;
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       struct task_struct *task = get_proc_task(inode);
-       const struct pid_entry *p, *last;
-       ino_t ino;
-       int ret;
+       struct task_struct *task = get_proc_task(file_inode(file));
+       const struct pid_entry *p;
 
-       ret = -ENOENT;
        if (!task)
-               goto out_no_task;
+               return -ENOENT;
 
-       ret = 0;
-       i = filp->f_pos;
-       switch (i) {
-       case 0:
-               ino = inode->i_ino;
-               if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
-                       goto out;
-               i++;
-               filp->f_pos++;
-               /* fall through */
-       case 1:
-               ino = parent_ino(dentry);
-               if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
-                       goto out;
-               i++;
-               filp->f_pos++;
-               /* fall through */
-       default:
-               i -= 2;
-               if (i >= nents) {
-                       ret = 1;
-                       goto out;
-               }
-               p = ents + i;
-               last = &ents[nents - 1];
-               while (p <= last) {
-                       if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
-                               goto out;
-                       filp->f_pos++;
-                       p++;
-               }
-       }
+       if (!dir_emit_dots(file, ctx))
+               goto out;
+
+       if (ctx->pos >= nents + 2)
+               goto out;
 
-       ret = 1;
+       for (p = ents + (ctx->pos - 2); p <= ents + nents - 1; p++) {
+               if (!proc_fill_cache(file, ctx, p->name, p->len,
+                               proc_pident_instantiate, task, p))
+                       break;
+               ctx->pos++;
+       }
 out:
        put_task_struct(task);
-out_no_task:
-       return ret;
+       return 0;
 }
 
 #ifdef CONFIG_SECURITY
@@ -2362,16 +2308,15 @@ static const struct pid_entry attr_dir_stuff[] = {
        REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
 };
 
-static int proc_attr_dir_readdir(struct file * filp,
-                            void * dirent, filldir_t filldir)
+static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
 {
-       return proc_pident_readdir(filp,dirent,filldir,
-                                  attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
+       return proc_pident_readdir(file, ctx, 
+                                  attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
 }
 
 static const struct file_operations proc_attr_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_attr_dir_readdir,
+       .iterate        = proc_attr_dir_readdir,
        .llseek         = default_llseek,
 };
 
@@ -2725,16 +2670,15 @@ static const struct pid_entry tgid_base_stuff[] = {
 #endif
 };
 
-static int proc_tgid_base_readdir(struct file * filp,
-                            void * dirent, filldir_t filldir)
+static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
 {
-       return proc_pident_readdir(filp,dirent,filldir,
-                                  tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
+       return proc_pident_readdir(file, ctx,
+                                  tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
 }
 
 static const struct file_operations proc_tgid_base_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_tgid_base_readdir,
+       .iterate        = proc_tgid_base_readdir,
        .llseek         = default_llseek,
 };
 
@@ -2936,58 +2880,42 @@ retry:
 
 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 1)
 
-static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
-       struct tgid_iter iter)
-{
-       char name[PROC_NUMBUF];
-       int len = snprintf(name, sizeof(name), "%d", iter.tgid);
-       return proc_fill_cache(filp, dirent, filldir, name, len,
-                               proc_pid_instantiate, iter.task, NULL);
-}
-
-static int fake_filldir(void *buf, const char *name, int namelen,
-                       loff_t offset, u64 ino, unsigned d_type)
-{
-       return 0;
-}
-
 /* for the /proc/ directory itself, after non-process stuff has been done */
-int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+int proc_pid_readdir(struct file *file, struct dir_context *ctx)
 {
        struct tgid_iter iter;
        struct pid_namespace *ns;
-       filldir_t __filldir;
-       loff_t pos = filp->f_pos;
+       loff_t pos = ctx->pos;
 
        if (pos >= PID_MAX_LIMIT + TGID_OFFSET)
-               goto out;
+               return 0;
 
        if (pos == TGID_OFFSET - 1) {
-               if (proc_fill_cache(filp, dirent, filldir, "self", 4,
-                                       NULL, NULL, NULL) < 0)
-                       goto out;
+               if (!proc_fill_cache(file, ctx, "self", 4, NULL, NULL, NULL))
+                       return 0;
                iter.tgid = 0;
        } else {
                iter.tgid = pos - TGID_OFFSET;
        }
        iter.task = NULL;
-       ns = filp->f_dentry->d_sb->s_fs_info;
+       ns = file->f_dentry->d_sb->s_fs_info;
        for (iter = next_tgid(ns, iter);
             iter.task;
             iter.tgid += 1, iter = next_tgid(ns, iter)) {
-               if (has_pid_permissions(ns, iter.task, 2))
-                       __filldir = filldir;
-               else
-                       __filldir = fake_filldir;
+               char name[PROC_NUMBUF];
+               int len;
+               if (!has_pid_permissions(ns, iter.task, 2))
+                       continue;
 
-               filp->f_pos = iter.tgid + TGID_OFFSET;
-               if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
+               len = snprintf(name, sizeof(name), "%d", iter.tgid);
+               ctx->pos = iter.tgid + TGID_OFFSET;
+               if (!proc_fill_cache(file, ctx, name, len,
+                                    proc_pid_instantiate, iter.task, NULL)) {
                        put_task_struct(iter.task);
-                       goto out;
+                       return 0;
                }
        }
-       filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
-out:
+       ctx->pos = PID_MAX_LIMIT + TGID_OFFSET;
        return 0;
 }
 
@@ -3075,11 +3003,10 @@ static const struct pid_entry tid_base_stuff[] = {
 #endif
 };
 
-static int proc_tid_base_readdir(struct file * filp,
-                            void * dirent, filldir_t filldir)
+static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
 {
-       return proc_pident_readdir(filp,dirent,filldir,
-                                  tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
+       return proc_pident_readdir(file, ctx,
+                                  tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
 }
 
 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
@@ -3090,7 +3017,7 @@ static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *den
 
 static const struct file_operations proc_tid_base_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_tid_base_readdir,
+       .iterate        = proc_tid_base_readdir,
        .llseek         = default_llseek,
 };
 
@@ -3231,30 +3158,16 @@ static struct task_struct *next_tid(struct task_struct *start)
        return pos;
 }
 
-static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
-       struct task_struct *task, int tid)
-{
-       char name[PROC_NUMBUF];
-       int len = snprintf(name, sizeof(name), "%d", tid);
-       return proc_fill_cache(filp, dirent, filldir, name, len,
-                               proc_task_instantiate, task, NULL);
-}
-
 /* for the /proc/TGID/task/ directories */
-static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static int proc_task_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
        struct task_struct *leader = NULL;
-       struct task_struct *task;
-       int retval = -ENOENT;
-       ino_t ino;
-       int tid;
+       struct task_struct *task = get_proc_task(file_inode(file));
        struct pid_namespace *ns;
+       int tid;
 
-       task = get_proc_task(inode);
        if (!task)
-               goto out_no_task;
+               return -ENOENT;
        rcu_read_lock();
        if (pid_alive(task)) {
                leader = task->group_leader;
@@ -3263,46 +3176,36 @@ static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldi
        rcu_read_unlock();
        put_task_struct(task);
        if (!leader)
-               goto out_no_task;
-       retval = 0;
+               return -ENOENT;
 
-       switch ((unsigned long)filp->f_pos) {
-       case 0:
-               ino = inode->i_ino;
-               if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0)
-                       goto out;
-               filp->f_pos++;
-               /* fall through */
-       case 1:
-               ino = parent_ino(dentry);
-               if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0)
-                       goto out;
-               filp->f_pos++;
-               /* fall through */
-       }
+       if (!dir_emit_dots(file, ctx))
+               goto out;
 
        /* f_version caches the tgid value that the last readdir call couldn't
         * return. lseek aka telldir automagically resets f_version to 0.
         */
-       ns = filp->f_dentry->d_sb->s_fs_info;
-       tid = (int)filp->f_version;
-       filp->f_version = 0;
-       for (task = first_tid(leader, tid, filp->f_pos - 2, ns);
+       ns = file->f_dentry->d_sb->s_fs_info;
+       tid = (int)file->f_version;
+       file->f_version = 0;
+       for (task = first_tid(leader, tid, ctx->pos - 2, ns);
             task;
-            task = next_tid(task), filp->f_pos++) {
+            task = next_tid(task), ctx->pos++) {
+               char name[PROC_NUMBUF];
+               int len;
                tid = task_pid_nr_ns(task, ns);
-               if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
+               len = snprintf(name, sizeof(name), "%d", tid);
+               if (!proc_fill_cache(file, ctx, name, len,
+                               proc_task_instantiate, task, NULL)) {
                        /* returning this tgid failed, save it as the first
                         * pid for the next readir call */
-                       filp->f_version = (u64)tid;
+                       file->f_version = (u64)tid;
                        put_task_struct(task);
                        break;
                }
        }
 out:
        put_task_struct(leader);
-out_no_task:
-       return retval;
+       return 0;
 }
 
 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
@@ -3328,6 +3231,6 @@ static const struct inode_operations proc_task_inode_operations = {
 
 static const struct file_operations proc_task_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_task_readdir,
+       .iterate        = proc_task_readdir,
        .llseek         = default_llseek,
 };
index d7a4a28ef63024c66ef862594701d1e389e77a47..1441f143c43b6d187f5d6f2b181118148d2501e5 100644 (file)
@@ -219,74 +219,58 @@ out_no_task:
        return result;
 }
 
-static int proc_readfd_common(struct file * filp, void * dirent,
-                             filldir_t filldir, instantiate_t instantiate)
+static int proc_readfd_common(struct file *file, struct dir_context *ctx,
+                             instantiate_t instantiate)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       struct task_struct *p = get_proc_task(inode);
+       struct task_struct *p = get_proc_task(file_inode(file));
        struct files_struct *files;
-       unsigned int fd, ino;
-       int retval;
+       unsigned int fd;
 
-       retval = -ENOENT;
        if (!p)
-               goto out_no_task;
-       retval = 0;
-
-       fd = filp->f_pos;
-       switch (fd) {
-               case 0:
-                       if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
-                               goto out;
-                       filp->f_pos++;
-               case 1:
-                       ino = parent_ino(dentry);
-                       if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
-                               goto out;
-                       filp->f_pos++;
-               default:
-                       files = get_files_struct(p);
-                       if (!files)
-                               goto out;
-                       rcu_read_lock();
-                       for (fd = filp->f_pos - 2;
-                            fd < files_fdtable(files)->max_fds;
-                            fd++, filp->f_pos++) {
-                               char name[PROC_NUMBUF];
-                               int len;
-                               int rv;
-
-                               if (!fcheck_files(files, fd))
-                                       continue;
-                               rcu_read_unlock();
+               return -ENOENT;
 
-                               len = snprintf(name, sizeof(name), "%d", fd);
-                               rv = proc_fill_cache(filp, dirent, filldir,
-                                                    name, len, instantiate, p,
-                                                    (void *)(unsigned long)fd);
-                               if (rv < 0)
-                                       goto out_fd_loop;
-                               rcu_read_lock();
-                       }
-                       rcu_read_unlock();
-out_fd_loop:
-                       put_files_struct(files);
+       if (!dir_emit_dots(file, ctx))
+               goto out;
+       if (!dir_emit_dots(file, ctx))
+               goto out;
+       files = get_files_struct(p);
+       if (!files)
+               goto out;
+
+       rcu_read_lock();
+       for (fd = ctx->pos - 2;
+            fd < files_fdtable(files)->max_fds;
+            fd++, ctx->pos++) {
+               char name[PROC_NUMBUF];
+               int len;
+
+               if (!fcheck_files(files, fd))
+                       continue;
+               rcu_read_unlock();
+
+               len = snprintf(name, sizeof(name), "%d", fd);
+               if (!proc_fill_cache(file, ctx,
+                                    name, len, instantiate, p,
+                                    (void *)(unsigned long)fd))
+                       goto out_fd_loop;
+               rcu_read_lock();
        }
+       rcu_read_unlock();
+out_fd_loop:
+       put_files_struct(files);
 out:
        put_task_struct(p);
-out_no_task:
-       return retval;
+       return 0;
 }
 
-static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
+static int proc_readfd(struct file *file, struct dir_context *ctx)
 {
-       return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
+       return proc_readfd_common(file, ctx, proc_fd_instantiate);
 }
 
 const struct file_operations proc_fd_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_readfd,
+       .iterate        = proc_readfd,
        .llseek         = default_llseek,
 };
 
@@ -351,9 +335,9 @@ proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags)
        return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
 }
 
-static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
+static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
 {
-       return proc_readfd_common(filp, dirent, filldir,
+       return proc_readfd_common(file, ctx,
                                  proc_fdinfo_instantiate);
 }
 
@@ -364,6 +348,6 @@ const struct inode_operations proc_fdinfo_inode_operations = {
 
 const struct file_operations proc_fdinfo_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_readfdinfo,
+       .iterate        = proc_readfdinfo,
        .llseek         = default_llseek,
 };
index a2596afffae6dad430f8cb2727628e162c567876..94441a407337bb02fb77e89fe93dfbbed169f827 100644 (file)
@@ -233,76 +233,52 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
  * value of the readdir() call, as long as it's non-negative
  * for success..
  */
-int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
-               filldir_t filldir)
+int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
+                   struct dir_context *ctx)
 {
-       unsigned int ino;
        int i;
-       struct inode *inode = file_inode(filp);
-       int ret = 0;
-
-       ino = inode->i_ino;
-       i = filp->f_pos;
-       switch (i) {
-               case 0:
-                       if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
-                               goto out;
-                       i++;
-                       filp->f_pos++;
-                       /* fall through */
-               case 1:
-                       if (filldir(dirent, "..", 2, i,
-                                   parent_ino(filp->f_path.dentry),
-                                   DT_DIR) < 0)
-                               goto out;
-                       i++;
-                       filp->f_pos++;
-                       /* fall through */
-               default:
-                       spin_lock(&proc_subdir_lock);
-                       de = de->subdir;
-                       i -= 2;
-                       for (;;) {
-                               if (!de) {
-                                       ret = 1;
-                                       spin_unlock(&proc_subdir_lock);
-                                       goto out;
-                               }
-                               if (!i)
-                                       break;
-                               de = de->next;
-                               i--;
-                       }
 
-                       do {
-                               struct proc_dir_entry *next;
-
-                               /* filldir passes info to user space */
-                               pde_get(de);
-                               spin_unlock(&proc_subdir_lock);
-                               if (filldir(dirent, de->name, de->namelen, filp->f_pos,
-                                           de->low_ino, de->mode >> 12) < 0) {
-                                       pde_put(de);
-                                       goto out;
-                               }
-                               spin_lock(&proc_subdir_lock);
-                               filp->f_pos++;
-                               next = de->next;
-                               pde_put(de);
-                               de = next;
-                       } while (de);
+       if (!dir_emit_dots(file, ctx))
+               return 0;
+
+       spin_lock(&proc_subdir_lock);
+       de = de->subdir;
+       i = ctx->pos - 2;
+       for (;;) {
+               if (!de) {
                        spin_unlock(&proc_subdir_lock);
+                       return 0;
+               }
+               if (!i)
+                       break;
+               de = de->next;
+               i--;
        }
-       ret = 1;
-out:
-       return ret;     
+
+       do {
+               struct proc_dir_entry *next;
+               pde_get(de);
+               spin_unlock(&proc_subdir_lock);
+               if (!dir_emit(ctx, de->name, de->namelen,
+                           de->low_ino, de->mode >> 12)) {
+                       pde_put(de);
+                       return 0;
+               }
+               spin_lock(&proc_subdir_lock);
+               ctx->pos++;
+               next = de->next;
+               pde_put(de);
+               de = next;
+       } while (de);
+       spin_unlock(&proc_subdir_lock);
+       return 0;
 }
 
-int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
+int proc_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
 
-       return proc_readdir_de(PDE(inode), filp, dirent, filldir);
+       return proc_readdir_de(PDE(inode), file, ctx);
 }
 
 /*
@@ -313,7 +289,7 @@ int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
 static const struct file_operations proc_dir_operations = {
        .llseek                 = generic_file_llseek,
        .read                   = generic_read_dir,
-       .readdir                = proc_readdir,
+       .iterate                = proc_readdir,
 };
 
 /*
index d600fb098b6ad3ad51d227709d43161d8a9d0abc..4eae2e149f31c05bb2d802c05f64bf91afdd4d29 100644 (file)
@@ -165,14 +165,14 @@ extern int proc_setattr(struct dentry *, struct iattr *);
 extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *);
 extern int pid_revalidate(struct dentry *, unsigned int);
 extern int pid_delete_dentry(const struct dentry *);
-extern int proc_pid_readdir(struct file *, void *, filldir_t);
+extern int proc_pid_readdir(struct file *, struct dir_context *);
 extern struct dentry *proc_pid_lookup(struct inode *, struct dentry *, unsigned int);
 extern loff_t mem_lseek(struct file *, loff_t, int);
 
 /* Lookups */
 typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
                                     struct task_struct *, const void *);
-extern int proc_fill_cache(struct file *, void *, filldir_t, const char *, int,
+extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, int,
                           instantiate_t, struct task_struct *, const void *);
 
 /*
@@ -183,8 +183,8 @@ extern spinlock_t proc_subdir_lock;
 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
                                     struct dentry *);
-extern int proc_readdir(struct file *, void *, filldir_t);
-extern int proc_readdir_de(struct proc_dir_entry *, struct file *, void *, filldir_t);
+extern int proc_readdir(struct file *, struct dir_context *);
+extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
 
 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
 {
index 54bdc6701e9fd785ddc14c277a48ff455683d773..f6abbbbfad8a765d115013ae65b07632b7e15258 100644 (file)
@@ -213,74 +213,36 @@ out:
        return error;
 }
 
-static int proc_ns_fill_cache(struct file *filp, void *dirent,
-       filldir_t filldir, struct task_struct *task,
-       const struct proc_ns_operations *ops)
+static int proc_ns_dir_readdir(struct file *file, struct dir_context *ctx)
 {
-       return proc_fill_cache(filp, dirent, filldir,
-                               ops->name, strlen(ops->name),
-                               proc_ns_instantiate, task, ops);
-}
-
-static int proc_ns_dir_readdir(struct file *filp, void *dirent,
-                               filldir_t filldir)
-{
-       int i;
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       struct task_struct *task = get_proc_task(inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
        const struct proc_ns_operations **entry, **last;
-       ino_t ino;
-       int ret;
 
-       ret = -ENOENT;
        if (!task)
-               goto out_no_task;
+               return -ENOENT;
 
-       ret = 0;
-       i = filp->f_pos;
-       switch (i) {
-       case 0:
-               ino = inode->i_ino;
-               if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
-                       goto out;
-               i++;
-               filp->f_pos++;
-               /* fall through */
-       case 1:
-               ino = parent_ino(dentry);
-               if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
-                       goto out;
-               i++;
-               filp->f_pos++;
-               /* fall through */
-       default:
-               i -= 2;
-               if (i >= ARRAY_SIZE(ns_entries)) {
-                       ret = 1;
-                       goto out;
-               }
-               entry = ns_entries + i;
-               last = &ns_entries[ARRAY_SIZE(ns_entries) - 1];
-               while (entry <= last) {
-                       if (proc_ns_fill_cache(filp, dirent, filldir,
-                                               task, *entry) < 0)
-                               goto out;
-                       filp->f_pos++;
-                       entry++;
-               }
+       if (!dir_emit_dots(file, ctx))
+               goto out;
+       if (ctx->pos >= 2 + ARRAY_SIZE(ns_entries))
+               goto out;
+       entry = ns_entries + (ctx->pos - 2);
+       last = &ns_entries[ARRAY_SIZE(ns_entries) - 1];
+       while (entry <= last) {
+               const struct proc_ns_operations *ops = *entry;
+               if (!proc_fill_cache(file, ctx, ops->name, strlen(ops->name),
+                                    proc_ns_instantiate, task, ops))
+                       break;
+               ctx->pos++;
+               entry++;
        }
-
-       ret = 1;
 out:
        put_task_struct(task);
-out_no_task:
-       return ret;
+       return 0;
 }
 
 const struct file_operations proc_ns_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_ns_dir_readdir,
+       .iterate        = proc_ns_dir_readdir,
 };
 
 static struct dentry *proc_ns_dir_lookup(struct inode *dir,
index 986e83220d56e57524b533acb8e6a52a6cffe612..4677bb7dc7c29cda9d7eb5825b86044851dbca6c 100644 (file)
@@ -160,16 +160,15 @@ const struct inode_operations proc_net_inode_operations = {
        .getattr        = proc_tgid_net_getattr,
 };
 
-static int proc_tgid_net_readdir(struct file *filp, void *dirent,
-               filldir_t filldir)
+static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx)
 {
        int ret;
        struct net *net;
 
        ret = -EINVAL;
-       net = get_proc_task_net(file_inode(filp));
+       net = get_proc_task_net(file_inode(file));
        if (net != NULL) {
-               ret = proc_readdir_de(net->proc_net, filp, dirent, filldir);
+               ret = proc_readdir_de(net->proc_net, file, ctx);
                put_net(net);
        }
        return ret;
@@ -178,7 +177,7 @@ static int proc_tgid_net_readdir(struct file *filp, void *dirent,
 const struct file_operations proc_net_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = proc_tgid_net_readdir,
+       .iterate        = proc_tgid_net_readdir,
 };
 
 static __net_init int proc_net_ns_init(struct net *net)
index ac05f33a0dde360aebbcacd9d02df8a8b927797a..f3a570e7c2575a723b5ac4061c1dd7aca1cb80c8 100644 (file)
@@ -573,12 +573,12 @@ out:
        return ret;
 }
 
-static int proc_sys_fill_cache(struct file *filp, void *dirent,
-                               filldir_t filldir,
+static bool proc_sys_fill_cache(struct file *file,
+                               struct dir_context *ctx,
                                struct ctl_table_header *head,
                                struct ctl_table *table)
 {
-       struct dentry *child, *dir = filp->f_path.dentry;
+       struct dentry *child, *dir = file->f_path.dentry;
        struct inode *inode;
        struct qstr qname;
        ino_t ino = 0;
@@ -595,38 +595,38 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
                        inode = proc_sys_make_inode(dir->d_sb, head, table);
                        if (!inode) {
                                dput(child);
-                               return -ENOMEM;
+                               return false;
                        } else {
                                d_set_d_op(child, &proc_sys_dentry_operations);
                                d_add(child, inode);
                        }
                } else {
-                       return -ENOMEM;
+                       return false;
                }
        }
        inode = child->d_inode;
        ino  = inode->i_ino;
        type = inode->i_mode >> 12;
        dput(child);
-       return !!filldir(dirent, qname.name, qname.len, filp->f_pos, ino, type);
+       return dir_emit(ctx, qname.name, qname.len, ino, type);
 }
 
-static int proc_sys_link_fill_cache(struct file *filp, void *dirent,
-                                   filldir_t filldir,
+static bool proc_sys_link_fill_cache(struct file *file,
+                                   struct dir_context *ctx,
                                    struct ctl_table_header *head,
                                    struct ctl_table *table)
 {
-       int err, ret = 0;
+       bool ret = true;
        head = sysctl_head_grab(head);
 
        if (S_ISLNK(table->mode)) {
                /* It is not an error if we can not follow the link ignore it */
-               err = sysctl_follow_link(&head, &table, current->nsproxy);
+               int err = sysctl_follow_link(&head, &table, current->nsproxy);
                if (err)
                        goto out;
        }
 
-       ret = proc_sys_fill_cache(filp, dirent, filldir, head, table);
+       ret = proc_sys_fill_cache(file, ctx, head, table);
 out:
        sysctl_head_finish(head);
        return ret;
@@ -634,67 +634,50 @@ out:
 
 static int scan(struct ctl_table_header *head, ctl_table *table,
                unsigned long *pos, struct file *file,
-               void *dirent, filldir_t filldir)
+               struct dir_context *ctx)
 {
-       int res;
+       bool res;
 
-       if ((*pos)++ < file->f_pos)
-               return 0;
+       if ((*pos)++ < ctx->pos)
+               return true;
 
        if (unlikely(S_ISLNK(table->mode)))
-               res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
+               res = proc_sys_link_fill_cache(file, ctx, head, table);
        else
-               res = proc_sys_fill_cache(file, dirent, filldir, head, table);
+               res = proc_sys_fill_cache(file, ctx, head, table);
 
-       if (res == 0)
-               file->f_pos = *pos;
+       if (res)
+               ctx->pos = *pos;
 
        return res;
 }
 
-static int proc_sys_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       struct ctl_table_header *head = grab_header(inode);
+       struct ctl_table_header *head = grab_header(file_inode(file));
        struct ctl_table_header *h = NULL;
        struct ctl_table *entry;
        struct ctl_dir *ctl_dir;
        unsigned long pos;
-       int ret = -EINVAL;
 
        if (IS_ERR(head))
                return PTR_ERR(head);
 
        ctl_dir = container_of(head, struct ctl_dir, header);
 
-       ret = 0;
-       /* Avoid a switch here: arm builds fail with missing __cmpdi2 */
-       if (filp->f_pos == 0) {
-               if (filldir(dirent, ".", 1, filp->f_pos,
-                               inode->i_ino, DT_DIR) < 0)
-                       goto out;
-               filp->f_pos++;
-       }
-       if (filp->f_pos == 1) {
-               if (filldir(dirent, "..", 2, filp->f_pos,
-                               parent_ino(dentry), DT_DIR) < 0)
-                       goto out;
-               filp->f_pos++;
-       }
+       if (!dir_emit_dots(file, ctx))
+               return 0;
+
        pos = 2;
 
        for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) {
-               ret = scan(h, entry, &pos, filp, dirent, filldir);
-               if (ret) {
+               if (!scan(h, entry, &pos, file, ctx)) {
                        sysctl_head_finish(h);
                        break;
                }
        }
-       ret = 1;
-out:
        sysctl_head_finish(head);
-       return ret;
+       return 0;
 }
 
 static int proc_sys_permission(struct inode *inode, int mask)
@@ -769,7 +752,7 @@ static const struct file_operations proc_sys_file_operations = {
 
 static const struct file_operations proc_sys_dir_file_operations = {
        .read           = generic_read_dir,
-       .readdir        = proc_sys_readdir,
+       .iterate        = proc_sys_readdir,
        .llseek         = generic_file_llseek,
 };
 
index 41a6ea93f486ff81b6b112821339a66f0b020324..229e366598daecd4e905e8f51f13efaf0a44e773 100644 (file)
@@ -202,21 +202,14 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
        return proc_pid_lookup(dir, dentry, flags);
 }
 
-static int proc_root_readdir(struct file * filp,
-       void * dirent, filldir_t filldir)
+static int proc_root_readdir(struct file *file, struct dir_context *ctx)
 {
-       unsigned int nr = filp->f_pos;
-       int ret;
-
-       if (nr < FIRST_PROCESS_ENTRY) {
-               int error = proc_readdir(filp, dirent, filldir);
-               if (error <= 0)
-                       return error;
-               filp->f_pos = FIRST_PROCESS_ENTRY;
+       if (ctx->pos < FIRST_PROCESS_ENTRY) {
+               proc_readdir(file, ctx);
+               ctx->pos = FIRST_PROCESS_ENTRY;
        }
 
-       ret = proc_pid_readdir(filp, dirent, filldir);
-       return ret;
+       return proc_pid_readdir(file, ctx);
 }
 
 /*
@@ -226,7 +219,7 @@ static int proc_root_readdir(struct file * filp,
  */
 static const struct file_operations proc_root_operations = {
        .read            = generic_read_dir,
-       .readdir         = proc_root_readdir,
+       .iterate         = proc_root_readdir,
        .llseek         = default_llseek,
 };
 
index 28ce014b3ceff09e8ad2a2d2cea8716802a1dc5b..b218f965817bf336625f64dc2d089d7e5f1ba9af 100644 (file)
@@ -14,9 +14,9 @@
 #include <linux/buffer_head.h>
 #include "qnx4.h"
 
-static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int qnx4_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        unsigned int offset;
        struct buffer_head *bh;
        struct qnx4_inode_entry *de;
@@ -26,48 +26,44 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
        int size;
 
        QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size));
-       QNX4DEBUG((KERN_INFO "filp->f_pos         = %ld\n", (long) filp->f_pos));
+       QNX4DEBUG((KERN_INFO "pos                 = %ld\n", (long) ctx->pos));
 
-       while (filp->f_pos < inode->i_size) {
-               blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS );
+       while (ctx->pos < inode->i_size) {
+               blknum = qnx4_block_map(inode, ctx->pos >> QNX4_BLOCK_SIZE_BITS);
                bh = sb_bread(inode->i_sb, blknum);
-               if(bh==NULL) {
+               if (bh == NULL) {
                        printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum);
-                       break;
+                       return 0;
                }
-               ix = (int)(filp->f_pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
-               while (ix < QNX4_INODES_PER_BLOCK) {
+               ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
+               for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
                        offset = ix * QNX4_DIR_ENTRY_SIZE;
                        de = (struct qnx4_inode_entry *) (bh->b_data + offset);
-                       size = strlen(de->di_fname);
-                       if (size) {
-                               if ( !( de->di_status & QNX4_FILE_LINK ) && size > QNX4_SHORT_NAME_MAX )
-                                       size = QNX4_SHORT_NAME_MAX;
-                               else if ( size > QNX4_NAME_MAX )
-                                       size = QNX4_NAME_MAX;
-
-                               if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) {
-                                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
-                                       if ( ( de->di_status & QNX4_FILE_LINK ) == 0 )
-                                               ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
-                                       else {
-                                               le  = (struct qnx4_link_info*)de;
-                                               ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
-                                                       QNX4_INODES_PER_BLOCK +
-                                                       le->dl_inode_ndx;
-                                       }
-                                       if (filldir(dirent, de->di_fname, size, filp->f_pos, ino, DT_UNKNOWN) < 0) {
-                                               brelse(bh);
-                                               goto out;
-                                       }
-                               }
+                       if (!de->di_fname[0])
+                               continue;
+                       if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
+                               continue;
+                       if (!(de->di_status & QNX4_FILE_LINK))
+                               size = QNX4_SHORT_NAME_MAX;
+                       else
+                               size = QNX4_NAME_MAX;
+                       size = strnlen(de->di_fname, size);
+                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
+                       if (!(de->di_status & QNX4_FILE_LINK))
+                               ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
+                       else {
+                               le  = (struct qnx4_link_info*)de;
+                               ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
+                                       QNX4_INODES_PER_BLOCK +
+                                       le->dl_inode_ndx;
+                       }
+                       if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
+                               brelse(bh);
+                               return 0;
                        }
-                       ix++;
-                       filp->f_pos += QNX4_DIR_ENTRY_SIZE;
                }
                brelse(bh);
        }
-out:
        return 0;
 }
 
@@ -75,7 +71,7 @@ const struct file_operations qnx4_dir_operations =
 {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = qnx4_readdir,
+       .iterate        = qnx4_readdir,
        .fsync          = generic_file_fsync,
 };
 
index afa6be6fc39759edc22abca58d3e21677576b0b4..15b7d92ed60d681a32b848a33a5c5c698d9a9fc5 100644 (file)
@@ -65,8 +65,8 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
 
 static int qnx6_dir_longfilename(struct inode *inode,
                        struct qnx6_long_dir_entry *de,
-                       void *dirent, loff_t pos,
-                       unsigned de_inode, filldir_t filldir)
+                       struct dir_context *ctx,
+                       unsigned de_inode)
 {
        struct qnx6_long_filename *lf;
        struct super_block *s = inode->i_sb;
@@ -104,8 +104,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
 
        QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s inode:%u\n",
                                        lf_size, lf->lf_fname, de_inode));
-       if (filldir(dirent, lf->lf_fname, lf_size, pos, de_inode,
-                       DT_UNKNOWN) < 0) {
+       if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) {
                qnx6_put_page(page);
                return 0;
        }
@@ -115,18 +114,19 @@ static int qnx6_dir_longfilename(struct inode *inode,
        return 1;
 }
 
-static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int qnx6_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *inode = file_inode(filp);
+       struct inode *inode = file_inode(file);
        struct super_block *s = inode->i_sb;
        struct qnx6_sb_info *sbi = QNX6_SB(s);
-       loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
+       loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
        unsigned long npages = dir_pages(inode);
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
        bool done = false;
 
-       if (filp->f_pos >= inode->i_size)
+       ctx->pos = pos;
+       if (ctx->pos >= inode->i_size)
                return 0;
 
        for ( ; !done && n < npages; n++, start = 0) {
@@ -137,11 +137,11 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
 
                if (IS_ERR(page)) {
                        printk(KERN_ERR "qnx6_readdir: read failed\n");
-                       filp->f_pos = (n + 1) << PAGE_CACHE_SHIFT;
+                       ctx->pos = (n + 1) << PAGE_CACHE_SHIFT;
                        return PTR_ERR(page);
                }
                de = ((struct qnx6_dir_entry *)page_address(page)) + start;
-               for (; i < limit; i++, de++, pos += QNX6_DIR_ENTRY_SIZE) {
+               for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
                        int size = de->de_size;
                        u32 no_inode = fs32_to_cpu(sbi, de->de_inode);
 
@@ -154,8 +154,7 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                   structure / block */
                                if (!qnx6_dir_longfilename(inode,
                                        (struct qnx6_long_dir_entry *)de,
-                                       dirent, pos, no_inode,
-                                       filldir)) {
+                                       ctx, no_inode)) {
                                        done = true;
                                        break;
                                }
@@ -163,9 +162,8 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                QNX6DEBUG((KERN_INFO "qnx6_readdir:%.*s"
                                   " inode:%u\n", size, de->de_fname,
                                                        no_inode));
-                               if (filldir(dirent, de->de_fname, size,
-                                     pos, no_inode, DT_UNKNOWN)
-                                       < 0) {
+                               if (!dir_emit(ctx, de->de_fname, size,
+                                     no_inode, DT_UNKNOWN)) {
                                        done = true;
                                        break;
                                }
@@ -173,7 +171,6 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
                }
                qnx6_put_page(page);
        }
-       filp->f_pos = pos;
        return 0;
 }
 
@@ -282,7 +279,7 @@ found:
 const struct file_operations qnx6_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = qnx6_readdir,
+       .iterate        = qnx6_readdir,
        .fsync          = generic_file_fsync,
 };
 
index 03430008704e68fd74470e8dbb9fcb637dce3f97..2cefa417be349b0016ffa8606635db9e85751015 100644 (file)
@@ -1064,6 +1064,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        struct fd in, out;
        struct inode *in_inode, *out_inode;
        loff_t pos;
+       loff_t out_pos;
        ssize_t retval;
        int fl;
 
@@ -1077,12 +1078,14 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        if (!(in.file->f_mode & FMODE_READ))
                goto fput_in;
        retval = -ESPIPE;
-       if (!ppos)
-               ppos = &in.file->f_pos;
-       else
+       if (!ppos) {
+               pos = in.file->f_pos;
+       } else {
+               pos = *ppos;
                if (!(in.file->f_mode & FMODE_PREAD))
                        goto fput_in;
-       retval = rw_verify_area(READ, in.file, ppos, count);
+       }
+       retval = rw_verify_area(READ, in.file, &pos, count);
        if (retval < 0)
                goto fput_in;
        count = retval;
@@ -1099,7 +1102,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        retval = -EINVAL;
        in_inode = file_inode(in.file);
        out_inode = file_inode(out.file);
-       retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
+       out_pos = out.file->f_pos;
+       retval = rw_verify_area(WRITE, out.file, &out_pos, count);
        if (retval < 0)
                goto fput_out;
        count = retval;
@@ -1107,7 +1111,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        if (!max)
                max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
 
-       pos = *ppos;
        if (unlikely(pos + count > max)) {
                retval = -EOVERFLOW;
                if (pos >= max)
@@ -1126,18 +1129,23 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        if (in.file->f_flags & O_NONBLOCK)
                fl = SPLICE_F_NONBLOCK;
 #endif
-       retval = do_splice_direct(in.file, ppos, out.file, count, fl);
+       retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
 
        if (retval > 0) {
                add_rchar(current, retval);
                add_wchar(current, retval);
                fsnotify_access(in.file);
                fsnotify_modify(out.file);
+               out.file->f_pos = out_pos;
+               if (ppos)
+                       *ppos = pos;
+               else
+                       in.file->f_pos = pos;
        }
 
        inc_syscr(current);
        inc_syscw(current);
-       if (*ppos > max)
+       if (pos > max)
                retval = -EOVERFLOW;
 
 fput_out:
index fee38e04fae4ab09440c05808e539f72133c6976..93d71e57431076f5773e335a3777e18ac3a90108 100644 (file)
 
 #include <asm/uaccess.h>
 
-int vfs_readdir(struct file *file, filldir_t filler, void *buf)
+int iterate_dir(struct file *file, struct dir_context *ctx)
 {
        struct inode *inode = file_inode(file);
        int res = -ENOTDIR;
-       if (!file->f_op || !file->f_op->readdir)
+       if (!file->f_op || !file->f_op->iterate)
                goto out;
 
        res = security_file_permission(file, MAY_READ);
@@ -37,15 +37,16 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
 
        res = -ENOENT;
        if (!IS_DEADDIR(inode)) {
-               res = file->f_op->readdir(file, buf, filler);
+               ctx->pos = file->f_pos;
+               res = file->f_op->iterate(file, ctx);
+               file->f_pos = ctx->pos;
                file_accessed(file);
        }
        mutex_unlock(&inode->i_mutex);
 out:
        return res;
 }
-
-EXPORT_SYMBOL(vfs_readdir);
+EXPORT_SYMBOL(iterate_dir);
 
 /*
  * Traditional linux readdir() handling..
@@ -66,6 +67,7 @@ struct old_linux_dirent {
 };
 
 struct readdir_callback {
+       struct dir_context ctx;
        struct old_linux_dirent __user * dirent;
        int result;
 };
@@ -73,7 +75,7 @@ struct readdir_callback {
 static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
                      u64 ino, unsigned int d_type)
 {
-       struct readdir_callback * buf = (struct readdir_callback *) __buf;
+       struct readdir_callback *buf = (struct readdir_callback *) __buf;
        struct old_linux_dirent __user * dirent;
        unsigned long d_ino;
 
@@ -107,15 +109,15 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
 {
        int error;
        struct fd f = fdget(fd);
-       struct readdir_callback buf;
+       struct readdir_callback buf = {
+               .ctx.actor = fillonedir,
+               .dirent = dirent
+       };
 
        if (!f.file)
                return -EBADF;
 
-       buf.result = 0;
-       buf.dirent = dirent;
-
-       error = vfs_readdir(f.file, fillonedir, &buf);
+       error = iterate_dir(f.file, &buf.ctx);
        if (buf.result)
                error = buf.result;
 
@@ -137,6 +139,7 @@ struct linux_dirent {
 };
 
 struct getdents_callback {
+       struct dir_context ctx;
        struct linux_dirent __user * current_dir;
        struct linux_dirent __user * previous;
        int count;
@@ -191,7 +194,11 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
 {
        struct fd f;
        struct linux_dirent __user * lastdirent;
-       struct getdents_callback buf;
+       struct getdents_callback buf = {
+               .ctx.actor = filldir,
+               .count = count,
+               .current_dir = dirent
+       };
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -201,17 +208,12 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
-       buf.current_dir = dirent;
-       buf.previous = NULL;
-       buf.count = count;
-       buf.error = 0;
-
-       error = vfs_readdir(f.file, filldir, &buf);
+       error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               if (put_user(f.file->f_pos, &lastdirent->d_off))
+               if (put_user(buf.ctx.pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
@@ -221,6 +223,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
 }
 
 struct getdents_callback64 {
+       struct dir_context ctx;
        struct linux_dirent64 __user * current_dir;
        struct linux_dirent64 __user * previous;
        int count;
@@ -271,7 +274,11 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
 {
        struct fd f;
        struct linux_dirent64 __user * lastdirent;
-       struct getdents_callback64 buf;
+       struct getdents_callback64 buf = {
+               .ctx.actor = filldir64,
+               .count = count,
+               .current_dir = dirent
+       };
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -281,17 +288,12 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
-       buf.current_dir = dirent;
-       buf.previous = NULL;
-       buf.count = count;
-       buf.error = 0;
-
-       error = vfs_readdir(f.file, filldir64, &buf);
+       error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               typeof(lastdirent->d_off) d_off = f.file->f_pos;
+               typeof(lastdirent->d_off) d_off = buf.ctx.pos;
                if (__put_user(d_off, &lastdirent->d_off))
                        error = -EFAULT;
                else
index 6c2d136561cbd5db121285d97ee335fdf20db081..03e4ca5624d6057f90c8230a2c6d89cac0fcd96f 100644 (file)
 
 extern const struct reiserfs_key MIN_KEY;
 
-static int reiserfs_readdir(struct file *, void *, filldir_t);
+static int reiserfs_readdir(struct file *, struct dir_context *);
 static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
                              int datasync);
 
 const struct file_operations reiserfs_dir_operations = {
        .llseek = generic_file_llseek,
        .read = generic_read_dir,
-       .readdir = reiserfs_readdir,
+       .iterate = reiserfs_readdir,
        .fsync = reiserfs_dir_fsync,
        .unlocked_ioctl = reiserfs_ioctl,
 #ifdef CONFIG_COMPAT
@@ -50,18 +50,15 @@ static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
 
 #define store_ih(where,what) copy_item_head (where, what)
 
-static inline bool is_privroot_deh(struct dentry *dir,
-                                  struct reiserfs_de_head *deh)
+static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *deh)
 {
-       struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
-       return (dir == dir->d_parent && privroot->d_inode &&
+       struct dentry *privroot = REISERFS_SB(dir->i_sb)->priv_root;
+       return (privroot->d_inode &&
                deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
 }
 
-int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
-                          filldir_t filldir, loff_t *pos)
+int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
 {
-       struct inode *inode = dentry->d_inode;
        struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
        INITIALIZE_PATH(path_to_entry);
        struct buffer_head *bh;
@@ -81,7 +78,7 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
 
        /* form key for search the next directory entry using f_pos field of
           file structure */
-       make_cpu_key(&pos_key, inode, *pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
+       make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
        next_pos = cpu_key_k_offset(&pos_key);
 
        path_to_entry.reada = PATH_READA;
@@ -126,7 +123,6 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
                             entry_num++, deh++) {
                                int d_reclen;
                                char *d_name;
-                               off_t d_off;
                                ino_t d_ino;
 
                                if (!de_visible(deh))
@@ -155,11 +151,10 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
                                }
 
                                /* Ignore the .reiserfs_priv entry */
-                               if (is_privroot_deh(dentry, deh))
+                               if (is_privroot_deh(inode, deh))
                                        continue;
 
-                               d_off = deh_offset(deh);
-                               *pos = d_off;
+                               ctx->pos = deh_offset(deh);
                                d_ino = deh_objectid(deh);
                                if (d_reclen <= 32) {
                                        local_buf = small_buf;
@@ -187,9 +182,9 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
                                 * the write lock here for other waiters
                                 */
                                reiserfs_write_unlock(inode->i_sb);
-                               if (filldir
-                                   (dirent, local_buf, d_reclen, d_off, d_ino,
-                                    DT_UNKNOWN) < 0) {
+                               if (!dir_emit
+                                   (ctx, local_buf, d_reclen, d_ino,
+                                    DT_UNKNOWN)) {
                                        reiserfs_write_lock(inode->i_sb);
                                        if (local_buf != small_buf) {
                                                kfree(local_buf);
@@ -237,7 +232,7 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
        }                       /* while */
 
 end:
-       *pos = next_pos;
+       ctx->pos = next_pos;
        pathrelse(&path_to_entry);
        reiserfs_check_path(&path_to_entry);
 out:
@@ -245,10 +240,9 @@ out:
        return ret;
 }
 
-static int reiserfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+static int reiserfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = file->f_path.dentry;
-       return reiserfs_readdir_dentry(dentry, dirent, filldir, &file->f_pos);
+       return reiserfs_readdir_inode(file_inode(file), ctx);
 }
 
 /* compose directory item containing "." and ".." entries (entries are
index f844533792ee99d7c7191f070869d1462cea52ab..0048cc16a6a8c5e6d54ebd1d9dd16f29f1d310ea 100644 (file)
@@ -2975,16 +2975,19 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
 }
 
 /* clm -- taken from fs/buffer.c:block_invalidate_page */
-static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
+static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
+                                   unsigned int length)
 {
        struct buffer_head *head, *bh, *next;
        struct inode *inode = page->mapping->host;
        unsigned int curr_off = 0;
+       unsigned int stop = offset + length;
+       int partial_page = (offset || length < PAGE_CACHE_SIZE);
        int ret = 1;
 
        BUG_ON(!PageLocked(page));
 
-       if (offset == 0)
+       if (!partial_page)
                ClearPageChecked(page);
 
        if (!page_has_buffers(page))
@@ -2996,6 +2999,9 @@ static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
                unsigned int next_off = curr_off + bh->b_size;
                next = bh->b_this_page;
 
+               if (next_off > stop)
+                       goto out;
+
                /*
                 * is this block fully invalidated?
                 */
@@ -3014,7 +3020,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
         * The get_block cached value has been unconditionally invalidated,
         * so real IO is not possible anymore.
         */
-       if (!offset && ret) {
+       if (!partial_page && ret) {
                ret = try_to_release_page(page, 0);
                /* maybe should BUG_ON(!ret); - neilb */
        }
index 157e474ab30347cde97360a4c94de73b5aff2164..3df5ce6c724d5bbafc27e655fc30870a5f35c6e1 100644 (file)
@@ -2709,7 +2709,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations;
 extern const struct inode_operations reiserfs_symlink_inode_operations;
 extern const struct inode_operations reiserfs_special_inode_operations;
 extern const struct file_operations reiserfs_dir_operations;
-int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *);
+int reiserfs_readdir_inode(struct inode *, struct dir_context *);
 
 /* tail_conversion.c */
 int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
index 821bcf70e467432e14868b0c5bb00cb1b6940598..c69cdd749f09b98b702c103062262c38b92606e1 100644 (file)
@@ -171,6 +171,7 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags)
  * modifying extended attributes. This includes operations such as permissions
  * or ownership changes, object deletions, etc. */
 struct reiserfs_dentry_buf {
+       struct dir_context ctx;
        struct dentry *xadir;
        int count;
        struct dentry *dentries[8];
@@ -223,9 +224,8 @@ static int reiserfs_for_each_xattr(struct inode *inode,
 {
        struct dentry *dir;
        int i, err = 0;
-       loff_t pos = 0;
        struct reiserfs_dentry_buf buf = {
-               .count = 0,
+               .ctx.actor = fill_with_dentries,
        };
 
        /* Skip out, an xattr has no xattrs associated with it */
@@ -249,29 +249,27 @@ static int reiserfs_for_each_xattr(struct inode *inode,
        reiserfs_write_lock(inode->i_sb);
 
        buf.xadir = dir;
-       err = reiserfs_readdir_dentry(dir, &buf, fill_with_dentries, &pos);
-       while ((err == 0 || err == -ENOSPC) && buf.count) {
-               err = 0;
-
-               for (i = 0; i < buf.count && buf.dentries[i]; i++) {
-                       int lerr = 0;
+       while (1) {
+               err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
+               if (err)
+                       break;
+               if (!buf.count)
+                       break;
+               for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
                        struct dentry *dentry = buf.dentries[i];
 
-                       if (err == 0 && !S_ISDIR(dentry->d_inode->i_mode))
-                               lerr = action(dentry, data);
+                       if (!S_ISDIR(dentry->d_inode->i_mode))
+                               err = action(dentry, data);
 
                        dput(dentry);
                        buf.dentries[i] = NULL;
-                       err = lerr ?: err;
                }
+               if (err)
+                       break;
                buf.count = 0;
-               if (!err)
-                       err = reiserfs_readdir_dentry(dir, &buf,
-                                                     fill_with_dentries, &pos);
        }
        mutex_unlock(&dir->d_inode->i_mutex);
 
-       /* Clean up after a failed readdir */
        cleanup_dentry_buf(&buf);
 
        if (!err) {
@@ -800,6 +798,7 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name)
 }
 
 struct listxattr_buf {
+       struct dir_context ctx;
        size_t size;
        size_t pos;
        char *buf;
@@ -845,8 +844,8 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
 {
        struct dentry *dir;
        int err = 0;
-       loff_t pos = 0;
        struct listxattr_buf buf = {
+               .ctx.actor = listxattr_filler,
                .dentry = dentry,
                .buf = buffer,
                .size = buffer ? size : 0,
@@ -868,7 +867,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
        }
 
        mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
-       err = reiserfs_readdir_dentry(dir, &buf, listxattr_filler, &pos);
+       err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
        mutex_unlock(&dir->d_inode->i_mutex);
 
        if (!err)
index 15cbc41ee3653133c6e7001aec75e754582362e6..ff1d3d42e72accd5a0572686d2c5f226ed59c5c3 100644 (file)
@@ -145,19 +145,18 @@ static const struct address_space_operations romfs_aops = {
 /*
  * read the entries from a directory
  */
-static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int romfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct inode *i = file_inode(filp);
+       struct inode *i = file_inode(file);
        struct romfs_inode ri;
        unsigned long offset, maxoff;
        int j, ino, nextfh;
-       int stored = 0;
        char fsname[ROMFS_MAXFN];       /* XXX dynamic? */
        int ret;
 
        maxoff = romfs_maxsize(i->i_sb);
 
-       offset = filp->f_pos;
+       offset = ctx->pos;
        if (!offset) {
                offset = i->i_ino & ROMFH_MASK;
                ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE);
@@ -170,10 +169,10 @@ static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        for (;;) {
                if (!offset || offset >= maxoff) {
                        offset = maxoff;
-                       filp->f_pos = offset;
+                       ctx->pos = offset;
                        goto out;
                }
-               filp->f_pos = offset;
+               ctx->pos = offset;
 
                /* Fetch inode info */
                ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE);
@@ -194,16 +193,14 @@ static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                nextfh = be32_to_cpu(ri.next);
                if ((nextfh & ROMFH_TYPE) == ROMFH_HRD)
                        ino = be32_to_cpu(ri.spec);
-               if (filldir(dirent, fsname, j, offset, ino,
-                           romfs_dtype_table[nextfh & ROMFH_TYPE]) < 0)
+               if (!dir_emit(ctx, fsname, j, ino,
+                           romfs_dtype_table[nextfh & ROMFH_TYPE]))
                        goto out;
 
-               stored++;
                offset = nextfh & ROMFH_MASK;
        }
-
 out:
-       return stored;
+       return 0;
 }
 
 /*
@@ -281,7 +278,7 @@ error:
 
 static const struct file_operations romfs_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = romfs_readdir,
+       .iterate        = romfs_readdir,
        .llseek         = default_llseek,
 };
 
index e6b25598c8c413d66026f96e6d6d1b351e28b62c..d37431dd60a1009f224d4c1bdc65b4fff505d2db 100644 (file)
@@ -1274,7 +1274,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
 {
        struct file *file = sd->u.file;
 
-       return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
+       return do_splice_from(pipe, file, sd->opos, sd->total_len,
                              sd->flags);
 }
 
@@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
  * @in:                file to splice from
  * @ppos:      input file offset
  * @out:       file to splice to
+ * @opos:      output file offset
  * @len:       number of bytes to splice
  * @flags:     splice modifier flags
  *
@@ -1294,7 +1295,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
  *
  */
 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
-                     size_t len, unsigned int flags)
+                     loff_t *opos, size_t len, unsigned int flags)
 {
        struct splice_desc sd = {
                .len            = len,
@@ -1302,6 +1303,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
                .flags          = flags,
                .pos            = *ppos,
                .u.file         = out,
+               .opos           = opos,
        };
        long ret;
 
@@ -1325,7 +1327,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
 {
        struct pipe_inode_info *ipipe;
        struct pipe_inode_info *opipe;
-       loff_t offset, *off;
+       loff_t offset;
        long ret;
 
        ipipe = get_pipe_info(in);
@@ -1356,13 +1358,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                                return -EINVAL;
                        if (copy_from_user(&offset, off_out, sizeof(loff_t)))
                                return -EFAULT;
-                       off = &offset;
-               } else
-                       off = &out->f_pos;
+               } else {
+                       offset = out->f_pos;
+               }
 
-               ret = do_splice_from(ipipe, out, off, len, flags);
+               ret = do_splice_from(ipipe, out, &offset, len, flags);
 
-               if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
+               if (!off_out)
+                       out->f_pos = offset;
+               else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
                        ret = -EFAULT;
 
                return ret;
@@ -1376,13 +1380,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                                return -EINVAL;
                        if (copy_from_user(&offset, off_in, sizeof(loff_t)))
                                return -EFAULT;
-                       off = &offset;
-               } else
-                       off = &in->f_pos;
+               } else {
+                       offset = in->f_pos;
+               }
 
-               ret = do_splice_to(in, off, opipe, len, flags);
+               ret = do_splice_to(in, &offset, opipe, len, flags);
 
-               if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
+               if (!off_in)
+                       in->f_pos = offset;
+               else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
                        ret = -EFAULT;
 
                return ret;
index 57dc70ebbb1993eb446f4571f19b0b6f7ea1d5ae..f7f527bf8c10f9f5271e4788c7d1e3ec8e80fd4b 100644 (file)
@@ -100,7 +100,7 @@ static int get_dir_index_using_offset(struct super_block *sb,
 }
 
 
-static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+static int squashfs_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *inode = file_inode(file);
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
@@ -127,11 +127,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
         * It also means that the external f_pos is offset by 3 from the
         * on-disk directory f_pos.
         */
-       while (file->f_pos < 3) {
+       while (ctx->pos < 3) {
                char *name;
                int i_ino;
 
-               if (file->f_pos == 0) {
+               if (ctx->pos == 0) {
                        name = ".";
                        size = 1;
                        i_ino = inode->i_ino;
@@ -141,24 +141,18 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
                        i_ino = squashfs_i(inode)->parent;
                }
 
-               TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n",
-                               dirent, name, size, file->f_pos, i_ino,
-                               squashfs_filetype_table[1]);
-
-               if (filldir(dirent, name, size, file->f_pos, i_ino,
-                               squashfs_filetype_table[1]) < 0) {
-                               TRACE("Filldir returned less than 0\n");
+               if (!dir_emit(ctx, name, size, i_ino,
+                               squashfs_filetype_table[1]))
                        goto finish;
-               }
 
-               file->f_pos += size;
+               ctx->pos += size;
        }
 
        length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
                                squashfs_i(inode)->dir_idx_start,
                                squashfs_i(inode)->dir_idx_offset,
                                squashfs_i(inode)->dir_idx_cnt,
-                               file->f_pos);
+                               ctx->pos);
 
        while (length < i_size_read(inode)) {
                /*
@@ -198,7 +192,7 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
 
                        length += sizeof(*dire) + size;
 
-                       if (file->f_pos >= length)
+                       if (ctx->pos >= length)
                                continue;
 
                        dire->name[size] = '\0';
@@ -206,22 +200,12 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
                                ((short) le16_to_cpu(dire->inode_number));
                        type = le16_to_cpu(dire->type);
 
-                       TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)"
-                                       "\n", dirent, dire->name, size,
-                                       file->f_pos,
-                                       le32_to_cpu(dirh.start_block),
-                                       le16_to_cpu(dire->offset),
-                                       inode_number,
-                                       squashfs_filetype_table[type]);
-
-                       if (filldir(dirent, dire->name, size, file->f_pos,
+                       if (!dir_emit(ctx, dire->name, size,
                                        inode_number,
-                                       squashfs_filetype_table[type]) < 0) {
-                               TRACE("Filldir returned less than 0\n");
+                                       squashfs_filetype_table[type]))
                                goto finish;
-                       }
 
-                       file->f_pos = length;
+                       ctx->pos = length;
                }
        }
 
@@ -238,6 +222,6 @@ failed_read:
 
 const struct file_operations squashfs_dir_ops = {
        .read = generic_read_dir,
-       .readdir = squashfs_readdir,
+       .iterate = squashfs_readdir,
        .llseek = default_llseek,
 };
index e8e0e71b29d54228bd3bd9fe95cd44e39422b2aa..4cfd742d260d6015b17973e68a6ed8d098b5159d 100644 (file)
@@ -998,68 +998,38 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
        return pos;
 }
 
-static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static int sysfs_readdir(struct file *file, struct dir_context *ctx)
 {
-       struct dentry *dentry = filp->f_path.dentry;
+       struct dentry *dentry = file->f_path.dentry;
        struct sysfs_dirent * parent_sd = dentry->d_fsdata;
-       struct sysfs_dirent *pos = filp->private_data;
+       struct sysfs_dirent *pos = file->private_data;
        enum kobj_ns_type type;
        const void *ns;
-       ino_t ino;
-       loff_t off;
 
        type = sysfs_ns_type(parent_sd);
        ns = sysfs_info(dentry->d_sb)->ns[type];
 
-       if (filp->f_pos == 0) {
-               ino = parent_sd->s_ino;
-               if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
-                       filp->f_pos++;
-               else
-                       return 0;
-       }
-       if (filp->f_pos == 1) {
-               if (parent_sd->s_parent)
-                       ino = parent_sd->s_parent->s_ino;
-               else
-                       ino = parent_sd->s_ino;
-               if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
-                       filp->f_pos++;
-               else
-                       return 0;
-       }
+       if (!dir_emit_dots(file, ctx))
+               return 0;
        mutex_lock(&sysfs_mutex);
-       off = filp->f_pos;
-       for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+       for (pos = sysfs_dir_pos(ns, parent_sd, ctx->pos, pos);
             pos;
-            pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
-               const char * name;
-               unsigned int type;
-               int len, ret;
-
-               name = pos->s_name;
-               len = strlen(name);
-               ino = pos->s_ino;
-               type = dt_type(pos);
-               off = filp->f_pos = pos->s_hash;
-               filp->private_data = sysfs_get(pos);
+            pos = sysfs_dir_next_pos(ns, parent_sd, ctx->pos, pos)) {
+               const char *name = pos->s_name;
+               unsigned int type = dt_type(pos);
+               int len = strlen(name);
+               ino_t ino = pos->s_ino;
+               ctx->pos = pos->s_hash;
+               file->private_data = sysfs_get(pos);
 
                mutex_unlock(&sysfs_mutex);
-               ret = filldir(dirent, name, len, off, ino, type);
+               if (!dir_emit(ctx, name, len, ino, type))
+                       return 0;
                mutex_lock(&sysfs_mutex);
-               if (ret < 0)
-                       break;
        }
        mutex_unlock(&sysfs_mutex);
-
-       /* don't reference last entry if its refcount is dropped */
-       if (!pos) {
-               filp->private_data = NULL;
-
-               /* EOF and not changed as 0 or 1 in read/write path */
-               if (off == filp->f_pos && off > 1)
-                       filp->f_pos = INT_MAX;
-       }
+       file->private_data = NULL;
+       ctx->pos = INT_MAX;
        return 0;
 }
 
@@ -1077,7 +1047,7 @@ static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
 
 const struct file_operations sysfs_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = sysfs_readdir,
+       .iterate        = sysfs_readdir,
        .release        = sysfs_dir_release,
        .llseek         = sysfs_dir_llseek,
 };
index 3799e8dac3ebf26e288a80086bc97adf136ff06b..d42291d08215abd016d4e3b5aac03fa753ce70b6 100644 (file)
 #include <linux/swap.h>
 #include "sysv.h"
 
-static int sysv_readdir(struct file *, void *, filldir_t);
+static int sysv_readdir(struct file *, struct dir_context *);
 
 const struct file_operations sysv_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .readdir        = sysv_readdir,
+       .iterate        = sysv_readdir,
        .fsync          = generic_file_fsync,
 };
 
@@ -65,18 +65,21 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
        return page;
 }
 
-static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static int sysv_readdir(struct file *file, struct dir_context *ctx)
 {
-       unsigned long pos = filp->f_pos;
-       struct inode *inode = file_inode(filp);
+       unsigned long pos = ctx->pos;
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
-       unsigned offset = pos & ~PAGE_CACHE_MASK;
-       unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned long npages = dir_pages(inode);
+       unsigned offset;
+       unsigned long n;
 
-       pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1);
+       ctx->pos = pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1);
        if (pos >= inode->i_size)
-               goto done;
+               return 0;
+
+       offset = pos & ~PAGE_CACHE_MASK;
+       n = pos >> PAGE_CACHE_SHIFT;
 
        for ( ; n < npages; n++, offset = 0) {
                char *kaddr, *limit;
@@ -88,29 +91,21 @@ static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
                kaddr = (char *)page_address(page);
                de = (struct sysv_dir_entry *)(kaddr+offset);
                limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE;
-               for ( ;(char*)de <= limit; de++) {
+               for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
                        char *name = de->name;
-                       int over;
 
                        if (!de->inode)
                                continue;
 
-                       offset = (char *)de - kaddr;
-
-                       over = filldir(dirent, name, strnlen(name,SYSV_NAMELEN),
-                                       ((loff_t)n<<PAGE_CACHE_SHIFT) | offset,
+                       if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN),
                                        fs16_to_cpu(SYSV_SB(sb), de->inode),
-                                       DT_UNKNOWN);
-                       if (over) {
+                                       DT_UNKNOWN)) {
                                dir_put_page(page);
-                               goto done;
+                               return 0;
                        }
                }
                dir_put_page(page);
        }
-
-done:
-       filp->f_pos = ((loff_t)n << PAGE_CACHE_SHIFT) | offset;
        return 0;
 }
 
index de08c92f2e234c9db1b9773940d5db5ba263535f..6b4947f75af7a00599c129b2d4e5a97a27222f4a 100644 (file)
@@ -346,38 +346,46 @@ static unsigned int vfs_dent_type(uint8_t type)
  * This means that UBIFS cannot support NFS which requires full
  * 'seekdir()'/'telldir()' support.
  */
-static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 {
-       int err, over = 0;
+       int err;
        struct qstr nm;
        union ubifs_key key;
        struct ubifs_dent_node *dent;
        struct inode *dir = file_inode(file);
        struct ubifs_info *c = dir->i_sb->s_fs_info;
 
-       dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
+       dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, ctx->pos);
 
-       if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
+       if (ctx->pos > UBIFS_S_KEY_HASH_MASK || ctx->pos == 2)
                /*
                 * The directory was seek'ed to a senseless position or there
                 * are no more entries.
                 */
                return 0;
 
-       /* File positions 0 and 1 correspond to "." and ".." */
-       if (file->f_pos == 0) {
-               ubifs_assert(!file->private_data);
-               over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
-               if (over)
-                       return 0;
-               file->f_pos = 1;
+       if (file->f_version == 0) {
+               /*
+                * The file was seek'ed, which means that @file->private_data
+                * is now invalid. This may also be just the first
+                * 'ubifs_readdir()' invocation, in which case
+                * @file->private_data is NULL, and the below code is
+                * basically a no-op.
+                */
+               kfree(file->private_data);
+               file->private_data = NULL;
        }
 
-       if (file->f_pos == 1) {
+       /*
+        * 'generic_file_llseek()' unconditionally sets @file->f_version to
+        * zero, and we use this for detecting whether the file was seek'ed.
+        */
+       file->f_version = 1;
+
+       /* File positions 0 and 1 correspond to "." and ".." */
+       if (ctx->pos < 2) {
                ubifs_assert(!file->private_data);
-               over = filldir(dirent, "..", 2, 1,
-                              parent_ino(file->f_path.dentry), DT_DIR);
-               if (over)
+               if (!dir_emit_dots(file, ctx))
                        return 0;
 
                /* Find the first entry in TNC and save it */
@@ -389,7 +397,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
                        goto out;
                }
 
-               file->f_pos = key_hash_flash(c, &dent->key);
+               ctx->pos = key_hash_flash(c, &dent->key);
                file->private_data = dent;
        }
 
@@ -397,17 +405,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
        if (!dent) {
                /*
                 * The directory was seek'ed to and is now readdir'ed.
-                * Find the entry corresponding to @file->f_pos or the
-                * closest one.
+                * Find the entry corresponding to @ctx->pos or the closest one.
                 */
-               dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
+               dent_key_init_hash(c, &key, dir->i_ino, ctx->pos);
                nm.name = NULL;
                dent = ubifs_tnc_next_ent(c, &key, &nm);
                if (IS_ERR(dent)) {
                        err = PTR_ERR(dent);
                        goto out;
                }
-               file->f_pos = key_hash_flash(c, &dent->key);
+               ctx->pos = key_hash_flash(c, &dent->key);
                file->private_data = dent;
        }
 
@@ -419,10 +426,9 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
                             ubifs_inode(dir)->creat_sqnum);
 
                nm.len = le16_to_cpu(dent->nlen);
-               over = filldir(dirent, dent->name, nm.len, file->f_pos,
+               if (!dir_emit(ctx, dent->name, nm.len,
                               le64_to_cpu(dent->inum),
-                              vfs_dent_type(dent->type));
-               if (over)
+                              vfs_dent_type(dent->type)))
                        return 0;
 
                /* Switch to the next entry */
@@ -435,7 +441,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
                }
 
                kfree(file->private_data);
-               file->f_pos = key_hash_flash(c, &dent->key);
+               ctx->pos = key_hash_flash(c, &dent->key);
                file->private_data = dent;
                cond_resched();
        }
@@ -448,18 +454,11 @@ out:
 
        kfree(file->private_data);
        file->private_data = NULL;
-       file->f_pos = 2;
+       /* 2 is a special value indicating that there are no more direntries */
+       ctx->pos = 2;
        return 0;
 }
 
-/* If a directory is seeked, we have to free saved readdir() state */
-static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence)
-{
-       kfree(file->private_data);
-       file->private_data = NULL;
-       return generic_file_llseek(file, offset, whence);
-}
-
 /* Free saved readdir() state when the directory is closed */
 static int ubifs_dir_release(struct inode *dir, struct file *file)
 {
@@ -1177,10 +1176,10 @@ const struct inode_operations ubifs_dir_inode_operations = {
 };
 
 const struct file_operations ubifs_dir_operations = {
-       .llseek         = ubifs_dir_llseek,
+       .llseek         = generic_file_llseek,
        .release        = ubifs_dir_release,
        .read           = generic_read_dir,
-       .readdir        = ubifs_readdir,
+       .iterate        = ubifs_readdir,
        .fsync          = ubifs_fsync,
        .unlocked_ioctl = ubifs_ioctl,
 #ifdef CONFIG_COMPAT
index 14374530784c683f36a15e52b64d792578a8ec07..123c79b7261ef8092e57477bd1141d365a0a2a3f 100644 (file)
@@ -1277,13 +1277,14 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
        return err;
 }
 
-static void ubifs_invalidatepage(struct page *page, unsigned long offset)
+static void ubifs_invalidatepage(struct page *page, unsigned int offset,
+                                unsigned int length)
 {
        struct inode *inode = page->mapping->host;
        struct ubifs_info *c = inode->i_sb->s_fs_info;
 
        ubifs_assert(PagePrivate(page));
-       if (offset)
+       if (offset || length < PAGE_CACHE_SIZE)
                /* Partial page remains dirty */
                return;
 
index b3e93f5e17c367fcfd818a4e7a0a455929baf847..a012c51caffd2a195b6015b9594d0f1862dba324 100644 (file)
 #include "udf_i.h"
 #include "udf_sb.h"
 
-static int do_udf_readdir(struct inode *dir, struct file *filp,
-                         filldir_t filldir, void *dirent)
+
+static int udf_readdir(struct file *file, struct dir_context *ctx)
 {
+       struct inode *dir = file_inode(file);
+       struct udf_inode_info *iinfo = UDF_I(dir);
        struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
        struct fileIdentDesc *fi = NULL;
        struct fileIdentDesc cfi;
        int block, iblock;
-       loff_t nf_pos = (filp->f_pos - 1) << 2;
+       loff_t nf_pos;
        int flen;
        unsigned char *fname = NULL;
        unsigned char *nameptr;
@@ -54,10 +56,14 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
        uint32_t elen;
        sector_t offset;
        int i, num, ret = 0;
-       unsigned int dt_type;
        struct extent_position epos = { NULL, 0, {0, 0} };
-       struct udf_inode_info *iinfo;
 
+       if (ctx->pos == 0) {
+               if (!dir_emit_dot(file, ctx))
+                       return 0;
+               ctx->pos = 1;
+       }
+       nf_pos = (ctx->pos - 1) << 2;
        if (nf_pos >= size)
                goto out;
 
@@ -71,7 +77,6 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
                nf_pos = udf_ext0_offset(dir);
 
        fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
-       iinfo = UDF_I(dir);
        if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
                if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
                    &epos, &eloc, &elen, &offset)
@@ -116,7 +121,9 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
        }
 
        while (nf_pos < size) {
-               filp->f_pos = (nf_pos >> 2) + 1;
+               struct kernel_lb_addr tloc;
+
+               ctx->pos = (nf_pos >> 2) + 1;
 
                fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
                                        &elen, &offset);
@@ -155,24 +162,22 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
                }
 
                if (cfi.fileCharacteristics & FID_FILE_CHAR_PARENT) {
-                       iblock = parent_ino(filp->f_path.dentry);
-                       flen = 2;
-                       memcpy(fname, "..", flen);
-                       dt_type = DT_DIR;
-               } else {
-                       struct kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation);
-
-                       iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
-                       flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
-                       dt_type = DT_UNKNOWN;
+                       if (!dir_emit_dotdot(file, ctx))
+                               goto out;
+                       continue;
                }
 
-               if (flen && filldir(dirent, fname, flen, filp->f_pos,
-                                   iblock, dt_type) < 0)
+               flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+               if (!flen)
+                       continue;
+
+               tloc = lelb_to_cpu(cfi.icb.extLocation);
+               iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
+               if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
                        goto out;
        } /* end while */
 
-       filp->f_pos = (nf_pos >> 2) + 1;
+       ctx->pos = (nf_pos >> 2) + 1;
 
 out:
        if (fibh.sbh != fibh.ebh)
@@ -184,27 +189,11 @@ out:
        return ret;
 }
 
-static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
-{
-       struct inode *dir = file_inode(filp);
-       int result;
-
-       if (filp->f_pos == 0) {
-               if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
-                       return 0;
-               }
-               filp->f_pos++;
-       }
-
-       result = do_udf_readdir(dir, filp, filldir, dirent);
-       return result;
-}
-
 /* readdir and lookup functions */
 const struct file_operations udf_dir_operations = {
        .llseek                 = generic_file_llseek,
        .read                   = generic_read_dir,
-       .readdir                = udf_readdir,
+       .iterate                = udf_readdir,
        .unlocked_ioctl         = udf_ioctl,
        .fsync                  = generic_file_fsync,
 };
index 3a75ca09c506591910e45a5bad970e80382b970c..0ecc2cebed8ffec60bc9ccdb408519253d7fc3a6 100644 (file)
@@ -430,16 +430,16 @@ ufs_validate_entry(struct super_block *sb, char *base,
  * This is blatantly stolen from ext2fs
  */
 static int
-ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ufs_readdir(struct file *file, struct dir_context *ctx)
 {
-       loff_t pos = filp->f_pos;
-       struct inode *inode = file_inode(filp);
+       loff_t pos = ctx->pos;
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned long npages = ufs_dir_pages(inode);
        unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
-       int need_revalidate = filp->f_version != inode->i_version;
+       int need_revalidate = file->f_version != inode->i_version;
        unsigned flags = UFS_SB(sb)->s_flags;
 
        UFSD("BEGIN\n");
@@ -457,16 +457,16 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        ufs_error(sb, __func__,
                                  "bad page in #%lu",
                                  inode->i_ino);
-                       filp->f_pos += PAGE_CACHE_SIZE - offset;
+                       ctx->pos += PAGE_CACHE_SIZE - offset;
                        return -EIO;
                }
                kaddr = page_address(page);
                if (unlikely(need_revalidate)) {
                        if (offset) {
                                offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
-                               filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
+                               ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
                        }
-                       filp->f_version = inode->i_version;
+                       file->f_version = inode->i_version;
                        need_revalidate = 0;
                }
                de = (struct ufs_dir_entry *)(kaddr+offset);
@@ -479,11 +479,8 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                return -EIO;
                        }
                        if (de->d_ino) {
-                               int over;
                                unsigned char d_type = DT_UNKNOWN;
 
-                               offset = (char *)de - kaddr;
-
                                UFSD("filldir(%s,%u)\n", de->d_name,
                                      fs32_to_cpu(sb, de->d_ino));
                                UFSD("namlen %u\n", ufs_get_de_namlen(sb, de));
@@ -491,16 +488,15 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
                                        d_type = de->d_u.d_44.d_type;
 
-                               over = filldir(dirent, de->d_name,
+                               if (!dir_emit(ctx, de->d_name,
                                               ufs_get_de_namlen(sb, de),
-                                               (n<<PAGE_CACHE_SHIFT) | offset,
-                                              fs32_to_cpu(sb, de->d_ino), d_type);
-                               if (over) {
+                                              fs32_to_cpu(sb, de->d_ino),
+                                              d_type)) {
                                        ufs_put_page(page);
                                        return 0;
                                }
                        }
-                       filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
+                       ctx->pos += fs16_to_cpu(sb, de->d_reclen);
                }
                ufs_put_page(page);
        }
@@ -660,7 +656,7 @@ not_empty:
 
 const struct file_operations ufs_dir_operations = {
        .read           = generic_read_dir,
-       .readdir        = ufs_readdir,
+       .iterate        = ufs_readdir,
        .fsync          = generic_file_fsync,
        .llseek         = generic_file_llseek,
 };
index 41a695048be7b09b87baf4517fd8124b6d7a8ffb..596ec71da00e8d4f005ab264142e927d03ac667d 100644 (file)
@@ -843,10 +843,12 @@ xfs_cluster_write(
 STATIC void
 xfs_vm_invalidatepage(
        struct page             *page,
-       unsigned long           offset)
+       unsigned int            offset,
+       unsigned int            length)
 {
-       trace_xfs_invalidatepage(page->mapping->host, page, offset);
-       block_invalidatepage(page, offset);
+       trace_xfs_invalidatepage(page->mapping->host, page, offset,
+                                length);
+       block_invalidatepage(page, offset, length);
 }
 
 /*
@@ -910,7 +912,7 @@ next_buffer:
 
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_invalidate:
-       xfs_vm_invalidatepage(page, 0);
+       xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
        return;
 }
 
@@ -940,7 +942,7 @@ xfs_vm_writepage(
        int                     count = 0;
        int                     nonblocking = 0;
 
-       trace_xfs_writepage(inode, page, 0);
+       trace_xfs_writepage(inode, page, 0, 0);
 
        ASSERT(page_has_buffers(page));
 
@@ -1171,7 +1173,7 @@ xfs_vm_releasepage(
 {
        int                     delalloc, unwritten;
 
-       trace_xfs_releasepage(page->mapping->host, page, 0);
+       trace_xfs_releasepage(page->mapping->host, page, 0, 0);
 
        xfs_count_page_state(page, &delalloc, &unwritten);
 
index b26a50f9921db60e746d43946b644415ba24ca6a..8f023dee404da0da9c2092ba15d5ad904588d5e4 100644 (file)
@@ -368,10 +368,8 @@ xfs_dir_removename(
 int
 xfs_readdir(
        xfs_inode_t     *dp,
-       void            *dirent,
-       size_t          bufsize,
-       xfs_off_t       *offset,
-       filldir_t       filldir)
+       struct dir_context *ctx,
+       size_t          bufsize)
 {
        int             rval;           /* return value */
        int             v;              /* type-checking value */
@@ -385,14 +383,13 @@ xfs_readdir(
        XFS_STATS_INC(xs_dir_getdents);
 
        if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
-               rval = xfs_dir2_sf_getdents(dp, dirent, offset, filldir);
+               rval = xfs_dir2_sf_getdents(dp, ctx);
        else if ((rval = xfs_dir2_isblock(NULL, dp, &v)))
                ;
        else if (v)
-               rval = xfs_dir2_block_getdents(dp, dirent, offset, filldir);
+               rval = xfs_dir2_block_getdents(dp, ctx);
        else
-               rval = xfs_dir2_leaf_getdents(dp, dirent, bufsize, offset,
-                                             filldir);
+               rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize);
        return rval;
 }
 
index e59f5fc816fe9a98a99ee11f712987fcd6c5b39a..09aea0247d9655c1cd7e9a7c77d1547968222813 100644 (file)
@@ -569,9 +569,7 @@ xfs_dir2_block_addname(
 int                                            /* error */
 xfs_dir2_block_getdents(
        xfs_inode_t             *dp,            /* incore inode */
-       void                    *dirent,
-       xfs_off_t               *offset,
-       filldir_t               filldir)
+       struct dir_context      *ctx)
 {
        xfs_dir2_data_hdr_t     *hdr;           /* block header */
        struct xfs_buf          *bp;            /* buffer for block */
@@ -589,7 +587,7 @@ xfs_dir2_block_getdents(
        /*
         * If the block number in the offset is out of range, we're done.
         */
-       if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk)
+       if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
                return 0;
 
        error = xfs_dir3_block_read(NULL, dp, &bp);
@@ -600,7 +598,7 @@ xfs_dir2_block_getdents(
         * Extract the byte offset we start at from the seek pointer.
         * We'll skip entries before this.
         */
-       wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
+       wantoff = xfs_dir2_dataptr_to_off(mp, ctx->pos);
        hdr = bp->b_addr;
        xfs_dir3_data_check(dp, bp);
        /*
@@ -639,13 +637,12 @@ xfs_dir2_block_getdents(
                cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
                                            (char *)dep - (char *)hdr);
 
+               ctx->pos = cook & 0x7fffffff;
                /*
                 * If it didn't fit, set the final offset to here & return.
                 */
-               if (filldir(dirent, (char *)dep->name, dep->namelen,
-                           cook & 0x7fffffff, be64_to_cpu(dep->inumber),
-                           DT_UNKNOWN)) {
-                       *offset = cook & 0x7fffffff;
+               if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
+                           be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
                        xfs_trans_brelse(NULL, bp);
                        return 0;
                }
@@ -655,7 +652,7 @@ xfs_dir2_block_getdents(
         * Reached the end of the block.
         * Set the offset to a non-existent block 1 and return.
         */
-       *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+       ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
                        0x7fffffff;
        xfs_trans_brelse(NULL, bp);
        return 0;
index da71a1819d780cd35bd9817fd89deabd5feb3eb7..e0cc1243a8aaea11765336a8f8908b25b6b1c963 100644 (file)
@@ -1300,10 +1300,8 @@ out:
 int                                            /* error */
 xfs_dir2_leaf_getdents(
        xfs_inode_t             *dp,            /* incore directory inode */
-       void                    *dirent,
-       size_t                  bufsize,
-       xfs_off_t               *offset,
-       filldir_t               filldir)
+       struct dir_context      *ctx,
+       size_t                  bufsize)
 {
        struct xfs_buf          *bp = NULL;     /* data block buffer */
        xfs_dir2_data_hdr_t     *hdr;           /* data block header */
@@ -1322,7 +1320,7 @@ xfs_dir2_leaf_getdents(
         * If the offset is at or past the largest allowed value,
         * give up right away.
         */
-       if (*offset >= XFS_DIR2_MAX_DATAPTR)
+       if (ctx->pos >= XFS_DIR2_MAX_DATAPTR)
                return 0;
 
        mp = dp->i_mount;
@@ -1343,7 +1341,7 @@ xfs_dir2_leaf_getdents(
         * Inside the loop we keep the main offset value as a byte offset
         * in the directory file.
         */
-       curoff = xfs_dir2_dataptr_to_byte(mp, *offset);
+       curoff = xfs_dir2_dataptr_to_byte(mp, ctx->pos);
 
        /*
         * Force this conversion through db so we truncate the offset
@@ -1444,8 +1442,8 @@ xfs_dir2_leaf_getdents(
                dep = (xfs_dir2_data_entry_t *)ptr;
                length = xfs_dir2_data_entsize(dep->namelen);
 
-               if (filldir(dirent, (char *)dep->name, dep->namelen,
-                           xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
+               ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
+               if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
                            be64_to_cpu(dep->inumber), DT_UNKNOWN))
                        break;
 
@@ -1462,9 +1460,9 @@ xfs_dir2_leaf_getdents(
         * All done.  Set output offset value to current offset.
         */
        if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
-               *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
+               ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
        else
-               *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
+               ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
        kmem_free(map_info);
        if (bp)
                xfs_trans_brelse(NULL, bp);
index 7cf573c88aad5394f977b5b794026159f2248488..0511cda4a712a480682b946829c5587cf37c4070 100644 (file)
@@ -33,8 +33,8 @@ extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
 extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
 
 extern int xfs_dir2_block_addname(struct xfs_da_args *args);
-extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent,
-               xfs_off_t *offset, filldir_t filldir);
+extern int xfs_dir2_block_getdents(struct xfs_inode *dp,
+               struct dir_context *ctx);
 extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_block_removename(struct xfs_da_args *args);
 extern int xfs_dir2_block_replace(struct xfs_da_args *args);
@@ -91,8 +91,8 @@ extern void xfs_dir3_leaf_compact(struct xfs_da_args *args,
 extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr,
                struct xfs_dir2_leaf_entry *ents, int *indexp,
                int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
-extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
-               size_t bufsize, xfs_off_t *offset, filldir_t filldir);
+extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, struct dir_context *ctx,
+               size_t bufsize);
 extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
                struct xfs_buf **bpp, __uint16_t magic);
 extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
@@ -153,8 +153,7 @@ extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
                int size, xfs_dir2_sf_hdr_t *sfhp);
 extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
 extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
-extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent,
-               xfs_off_t *offset, filldir_t filldir);
+extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, struct dir_context *ctx);
 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
index 6157424dbf8f6eb43a1a84f317a569797daa4323..97676a347da166e5d843db8ab2052666390e018d 100644 (file)
@@ -768,9 +768,7 @@ xfs_dir2_sf_create(
 int                                            /* error */
 xfs_dir2_sf_getdents(
        xfs_inode_t             *dp,            /* incore directory inode */
-       void                    *dirent,
-       xfs_off_t               *offset,
-       filldir_t               filldir)
+       struct dir_context      *ctx)
 {
        int                     i;              /* shortform entry number */
        xfs_mount_t             *mp;            /* filesystem mount point */
@@ -802,7 +800,7 @@ xfs_dir2_sf_getdents(
        /*
         * If the block number in the offset is out of range, we're done.
         */
-       if (xfs_dir2_dataptr_to_db(mp, *offset) > mp->m_dirdatablk)
+       if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
                return 0;
 
        /*
@@ -819,22 +817,20 @@ xfs_dir2_sf_getdents(
        /*
         * Put . entry unless we're starting past it.
         */
-       if (*offset <= dot_offset) {
-               if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, dp->i_ino, DT_DIR)) {
-                       *offset = dot_offset & 0x7fffffff;
+       if (ctx->pos <= dot_offset) {
+               ctx->pos = dot_offset & 0x7fffffff;
+               if (!dir_emit(ctx, ".", 1, dp->i_ino, DT_DIR))
                        return 0;
-               }
        }
 
        /*
         * Put .. entry unless we're starting past it.
         */
-       if (*offset <= dotdot_offset) {
+       if (ctx->pos <= dotdot_offset) {
                ino = xfs_dir2_sf_get_parent_ino(sfp);
-               if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
-                       *offset = dotdot_offset & 0x7fffffff;
+               ctx->pos = dotdot_offset & 0x7fffffff;
+               if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
                        return 0;
-               }
        }
 
        /*
@@ -845,21 +841,20 @@ xfs_dir2_sf_getdents(
                off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
                                xfs_dir2_sf_get_offset(sfep));
 
-               if (*offset > off) {
+               if (ctx->pos > off) {
                        sfep = xfs_dir2_sf_nextentry(sfp, sfep);
                        continue;
                }
 
                ino = xfs_dir2_sfe_get_ino(sfp, sfep);
-               if (filldir(dirent, (char *)sfep->name, sfep->namelen,
-                           off & 0x7fffffff, ino, DT_UNKNOWN)) {
-                       *offset = off & 0x7fffffff;
+               ctx->pos = off & 0x7fffffff;
+               if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen,
+                           ino, DT_UNKNOWN))
                        return 0;
-               }
                sfep = xfs_dir2_sf_nextentry(sfp, sfep);
        }
 
-       *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+       ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
                        0x7fffffff;
        return 0;
 }
index a5f2042aec8b27e730f0cbdedaef9eb50c9422f0..0ad2b95fca12fbd215b9eb63f84b2bd7c21b6609 100644 (file)
@@ -906,11 +906,10 @@ xfs_file_release(
 
 STATIC int
 xfs_file_readdir(
-       struct file     *filp,
-       void            *dirent,
-       filldir_t       filldir)
+       struct file     *file,
+       struct dir_context *ctx)
 {
-       struct inode    *inode = file_inode(filp);
+       struct inode    *inode = file_inode(file);
        xfs_inode_t     *ip = XFS_I(inode);
        int             error;
        size_t          bufsize;
@@ -929,8 +928,7 @@ xfs_file_readdir(
         */
        bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
 
-       error = xfs_readdir(ip, dirent, bufsize,
-                               (xfs_off_t *)&filp->f_pos, filldir);
+       error = xfs_readdir(ip, ctx, bufsize);
        if (error)
                return -error;
        return 0;
@@ -1432,7 +1430,7 @@ const struct file_operations xfs_file_operations = {
 const struct file_operations xfs_dir_file_operations = {
        .open           = xfs_dir_open,
        .read           = generic_read_dir,
-       .readdir        = xfs_file_readdir,
+       .iterate        = xfs_file_readdir,
        .llseek         = generic_file_llseek,
        .unlocked_ioctl = xfs_file_ioctl,
 #ifdef CONFIG_COMPAT
index aa4db3307d369f0687cceb93e978d48397aeefcf..a04701de6bbd2cfad8fa844a38bdf66c20b744db 100644 (file)
@@ -974,14 +974,16 @@ DEFINE_RW_EVENT(xfs_file_splice_read);
 DEFINE_RW_EVENT(xfs_file_splice_write);
 
 DECLARE_EVENT_CLASS(xfs_page_class,
-       TP_PROTO(struct inode *inode, struct page *page, unsigned long off),
-       TP_ARGS(inode, page, off),
+       TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
+                unsigned int len),
+       TP_ARGS(inode, page, off, len),
        TP_STRUCT__entry(
                __field(dev_t, dev)
                __field(xfs_ino_t, ino)
                __field(pgoff_t, pgoff)
                __field(loff_t, size)
                __field(unsigned long, offset)
+               __field(unsigned int, length)
                __field(int, delalloc)
                __field(int, unwritten)
        ),
@@ -995,24 +997,27 @@ DECLARE_EVENT_CLASS(xfs_page_class,
                __entry->pgoff = page_offset(page);
                __entry->size = i_size_read(inode);
                __entry->offset = off;
+               __entry->length = len;
                __entry->delalloc = delalloc;
                __entry->unwritten = unwritten;
        ),
        TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
-                 "delalloc %d unwritten %d",
+                 "length %x delalloc %d unwritten %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->pgoff,
                  __entry->size,
                  __entry->offset,
+                 __entry->length,
                  __entry->delalloc,
                  __entry->unwritten)
 )
 
 #define DEFINE_PAGE_EVENT(name)                \
 DEFINE_EVENT(xfs_page_class, name,     \
-       TP_PROTO(struct inode *inode, struct page *page, unsigned long off),    \
-       TP_ARGS(inode, page, off))
+       TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
+                unsigned int len),     \
+       TP_ARGS(inode, page, off, len))
 DEFINE_PAGE_EVENT(xfs_writepage);
 DEFINE_PAGE_EVENT(xfs_releasepage);
 DEFINE_PAGE_EVENT(xfs_invalidatepage);
index 5163022d98089b9b6eb2fb4bdbd9df8f0861b92b..38c67c34d73f49848a91223988cf4cdf46ffb5ce 100644 (file)
@@ -31,8 +31,7 @@ int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
                struct xfs_inode *ip);
 int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
                struct xfs_name *target_name);
-int xfs_readdir(struct xfs_inode       *dp, void *dirent, size_t bufsize,
-                      xfs_off_t *offset, filldir_t filldir);
+int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, size_t bufsize);
 int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
                const char *target_path, umode_t mode, struct xfs_inode **ipp);
 int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
index 636c59f2003a315c264777eccd0e3abfbdd8045f..c13c919ab99e9803b2ebe18d36eb93ab98bf6076 100644 (file)
@@ -382,6 +382,7 @@ const char *acpi_power_state_string(int state);
 int acpi_device_get_power(struct acpi_device *device, int *state);
 int acpi_device_set_power(struct acpi_device *device, int state);
 int acpi_bus_init_power(struct acpi_device *device);
+int acpi_device_fix_up_power(struct acpi_device *device);
 int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
 
index e6168a24b9f0f202e9403f946c9aecbb8049f67b..b420939f5eb5608f2335e0b4ae4087176911022b 100644 (file)
@@ -123,7 +123,9 @@ extern int register_dock_notifier(struct notifier_block *nb);
 extern void unregister_dock_notifier(struct notifier_block *nb);
 extern int register_hotplug_dock_device(acpi_handle handle,
                                        const struct acpi_dock_ops *ops,
-                                       void *context);
+                                       void *context,
+                                       void (*init)(void *),
+                                       void (*release)(void *));
 extern void unregister_hotplug_dock_device(acpi_handle handle);
 #else
 static inline int is_dock_device(acpi_handle handle)
@@ -139,7 +141,9 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
 }
 static inline int register_hotplug_dock_device(acpi_handle handle,
                                               const struct acpi_dock_ops *ops,
-                                              void *context)
+                                              void *context,
+                                              void (*init)(void *),
+                                              void (*release)(void *))
 {
        return -ENODEV;
 }
index a59ff51b016695f54095e753cbfc2a5a6b684684..b1836987d5063f390622458683e79c4ca84b11ef 100644 (file)
@@ -692,4 +692,8 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
 
 #endif /* !__ASSEMBLY__ */
 
+#ifndef io_remap_pfn_range
+#define io_remap_pfn_range remap_pfn_range
+#endif
+
 #endif /* _ASM_GENERIC_PGTABLE_H */
index 9e52b0626b39461bf21a3b71b32b0e7b2fadaf5c..f5a3b838ddb00639aa1c6669ff60206e1f146507 100644 (file)
@@ -198,7 +198,8 @@ extern int buffer_heads_over_limit;
  * Generic address_space_operations implementations for buffer_head-backed
  * address_spaces.
  */
-void block_invalidatepage(struct page *page, unsigned long offset);
+void block_invalidatepage(struct page *page, unsigned int offset,
+                         unsigned int length);
 int block_write_full_page(struct page *page, get_block_t *get_block,
                                struct writeback_control *wbc);
 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
index 73bab0f58af5c9ddf455881de6bb00124c9948dd..7571a16bd6535245c0fb68cce7e18dd5d21f1fd1 100644 (file)
@@ -75,10 +75,7 @@ extern const struct consw newport_con;       /* SGI Newport console  */
 extern const struct consw prom_con;    /* SPARC PROM console */
 
 int con_is_bound(const struct consw *csw);
-int register_con_driver(const struct consw *csw, int first, int last);
-int unregister_con_driver(const struct consw *csw);
 int do_unregister_con_driver(const struct consw *csw);
-int take_over_console(const struct consw *sw, int first, int last, int deflt);
 int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
 void give_up_console(const struct consw *sw);
 #ifdef CONFIG_HW_CONSOLE
index 365f4a61bf0408c12b3b3dbeb1743c8a2b616c35..fc09d7b0dacfc00feef340cc03c06b207feed870 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/sched.h>
 #include <linux/percpu.h>
+#include <linux/vtime.h>
 #include <asm/ptrace.h>
 
 struct context_tracking {
@@ -19,6 +20,26 @@ struct context_tracking {
        } state;
 };
 
+static inline void __guest_enter(void)
+{
+       /*
+        * This is running in ioctl context so we can avoid
+        * the call to vtime_account() with its unnecessary idle check.
+        */
+       vtime_account_system(current);
+       current->flags |= PF_VCPU;
+}
+
+static inline void __guest_exit(void)
+{
+       /*
+        * This is running in ioctl context so we can avoid
+        * the call to vtime_account() with its unnecessary idle check.
+        */
+       vtime_account_system(current);
+       current->flags &= ~PF_VCPU;
+}
+
 #ifdef CONFIG_CONTEXT_TRACKING
 DECLARE_PER_CPU(struct context_tracking, context_tracking);
 
@@ -35,6 +56,9 @@ static inline bool context_tracking_active(void)
 extern void user_enter(void);
 extern void user_exit(void);
 
+extern void guest_enter(void);
+extern void guest_exit(void);
+
 static inline enum ctx_state exception_enter(void)
 {
        enum ctx_state prev_ctx;
@@ -57,6 +81,17 @@ extern void context_tracking_task_switch(struct task_struct *prev,
 static inline bool context_tracking_in_user(void) { return false; }
 static inline void user_enter(void) { }
 static inline void user_exit(void) { }
+
+static inline void guest_enter(void)
+{
+       __guest_enter();
+}
+
+static inline void guest_exit(void)
+{
+       __guest_exit();
+}
+
 static inline enum ctx_state exception_enter(void) { return 0; }
 static inline void exception_exit(enum ctx_state prev_ctx) { }
 static inline void context_tracking_task_switch(struct task_struct *prev,
index df6fab82f87e7650bafaa247523713fc3cc4be2f..383d5e39b280a1eeb2dc2d60d37446112624a493 100644 (file)
@@ -20,8 +20,8 @@
 #define F2FS_BLKSIZE                   4096    /* support only 4KB block */
 #define F2FS_MAX_EXTENSION             64      /* # of extension entries */
 
-#define NULL_ADDR              0x0U
-#define NEW_ADDR               -1U
+#define NULL_ADDR              ((block_t)0)    /* used as block_t addresses */
+#define NEW_ADDR               ((block_t)-1)   /* used as block_t addresses */
 
 #define F2FS_ROOT_INO(sbi)     (sbi->root_ino_num)
 #define F2FS_NODE_INO(sbi)     (sbi->node_ino_num)
index 43db02e9c9fa11bed1b058fba9dd9039ea9c226d..f8a5240541b77e17df8bf17fa4c10399cdb7fbd2 100644 (file)
@@ -364,7 +364,7 @@ struct address_space_operations {
 
        /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
        sector_t (*bmap)(struct address_space *, sector_t);
-       void (*invalidatepage) (struct page *, unsigned long);
+       void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, gfp_t);
        void (*freepage)(struct page *);
        ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -1506,6 +1506,11 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
  * to have different dirent layouts depending on the binary type.
  */
 typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
+struct dir_context {
+       const filldir_t actor;
+       loff_t pos;
+};
+
 struct block_device_operations;
 
 /* These macros are for out of kernel modules to test that
@@ -1521,7 +1526,7 @@ struct file_operations {
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
-       int (*readdir) (struct file *, void *, filldir_t);
+       int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -2414,8 +2419,6 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
                struct file *, loff_t *, size_t, unsigned int);
 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
                struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
-               size_t len, unsigned int flags);
 
 extern void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
@@ -2496,6 +2499,7 @@ loff_t inode_get_bytes(struct inode *inode);
 void inode_set_bytes(struct inode *inode, loff_t bytes);
 
 extern int vfs_readdir(struct file *, filldir_t, void *);
+extern int iterate_dir(struct file *, struct dir_context *);
 
 extern int vfs_stat(const char __user *, struct kstat *);
 extern int vfs_lstat(const char __user *, struct kstat *);
@@ -2526,7 +2530,7 @@ extern void iterate_supers_type(struct file_system_type *,
 extern int dcache_dir_open(struct inode *, struct file *);
 extern int dcache_dir_close(struct inode *, struct file *);
 extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
-extern int dcache_readdir(struct file *, void *, filldir_t);
+extern int dcache_readdir(struct file *, struct dir_context *);
 extern int simple_setattr(struct dentry *, struct iattr *);
 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int simple_statfs(struct dentry *, struct kstatfs *);
@@ -2690,4 +2694,41 @@ static inline void inode_has_no_xattr(struct inode *inode)
                inode->i_flags |= S_NOSEC;
 }
 
+static inline bool dir_emit(struct dir_context *ctx,
+                           const char *name, int namelen,
+                           u64 ino, unsigned type)
+{
+       return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+}
+static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
+{
+       return ctx->actor(ctx, ".", 1, ctx->pos,
+                         file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+}
+static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
+{
+       return ctx->actor(ctx, "..", 2, ctx->pos,
+                         parent_ino(file->f_path.dentry), DT_DIR) == 0;
+}
+static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
+{
+       if (ctx->pos == 0) {
+               if (!dir_emit_dot(file, ctx))
+                       return false;
+               ctx->pos = 1;
+       }
+       if (ctx->pos == 1) {
+               if (!dir_emit_dotdot(file, ctx))
+                       return false;
+               ctx->pos = 2;
+       }
+       return true;
+}
+static inline bool dir_relax(struct inode *inode)
+{
+       mutex_unlock(&inode->i_mutex);
+       mutex_lock(&inode->i_mutex);
+       return !IS_DEADDIR(inode);
+}
+
 #endif /* _LINUX_FS_H */
index 5dfa0aa216b66f410f591c58815a17b2302e8bf8..a9ff9a36b86dc45c0f324fc2bacfc036f49fd0e3 100644 (file)
@@ -97,7 +97,8 @@ struct fscache_operation {
 #define FSCACHE_OP_WAITING     4       /* cleared when op is woken */
 #define FSCACHE_OP_EXCLUSIVE   5       /* exclusive op, other ops must wait */
 #define FSCACHE_OP_DEC_READ_CNT        6       /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_KEEP_FLAGS  0x0070  /* flags to keep when repurposing an op */
+#define FSCACHE_OP_UNUSE_COOKIE        7       /* call fscache_unuse_cookie() on completion */
+#define FSCACHE_OP_KEEP_FLAGS  0x00f0  /* flags to keep when repurposing an op */
 
        enum fscache_operation_state state;
        atomic_t                usage;
@@ -150,7 +151,7 @@ struct fscache_retrieval {
        void                    *context;       /* netfs read context (pinned) */
        struct list_head        to_do;          /* list of things to be done by the backend */
        unsigned long           start_time;     /* time at which retrieval started */
-       unsigned                n_pages;        /* number of pages to be retrieved */
+       atomic_t                n_pages;        /* number of pages to be retrieved */
 };
 
 typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
@@ -194,15 +195,14 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
 static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
                                              int n_pages)
 {
-       op->n_pages -= n_pages;
-       if (op->n_pages <= 0)
+       atomic_sub(n_pages, &op->n_pages);
+       if (atomic_read(&op->n_pages) <= 0)
                fscache_op_complete(&op->op, true);
 }
 
 /**
  * fscache_put_retrieval - Drop a reference to a retrieval operation
  * @op: The retrieval operation affected
- * @n_pages: The number of pages to account for
  *
  * Drop a reference to a retrieval operation.
  */
@@ -314,6 +314,7 @@ struct fscache_cache_ops {
 struct fscache_cookie {
        atomic_t                        usage;          /* number of users of this cookie */
        atomic_t                        n_children;     /* number of children of this cookie */
+       atomic_t                        n_active;       /* number of active users of netfs ptrs */
        spinlock_t                      lock;
        spinlock_t                      stores_lock;    /* lock on page store tree */
        struct hlist_head               backing_objects; /* object(s) backing this file/index */
@@ -326,13 +327,11 @@ struct fscache_cookie {
 
        unsigned long                   flags;
 #define FSCACHE_COOKIE_LOOKING_UP      0       /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_CREATING                1       /* T if non-index object being created still */
-#define FSCACHE_COOKIE_NO_DATA_YET     2       /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_PENDING_FILL    3       /* T if pending initial fill on object */
-#define FSCACHE_COOKIE_FILLING         4       /* T if filling object incrementally */
-#define FSCACHE_COOKIE_UNAVAILABLE     5       /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_WAITING_ON_READS        6       /* T if cookie is waiting on reads */
-#define FSCACHE_COOKIE_INVALIDATING    7       /* T if cookie is being invalidated */
+#define FSCACHE_COOKIE_NO_DATA_YET     1       /* T if new object with no cached data yet */
+#define FSCACHE_COOKIE_UNAVAILABLE     2       /* T if cookie is unavailable (error, etc) */
+#define FSCACHE_COOKIE_INVALIDATING    3       /* T if cookie is being invalidated */
+#define FSCACHE_COOKIE_RELINQUISHED    4       /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_RETIRED         5       /* T if cookie was retired */
 };
 
 extern struct fscache_cookie fscache_fsdef_index;
@@ -341,45 +340,40 @@ extern struct fscache_cookie fscache_fsdef_index;
  * Event list for fscache_object::{event_mask,events}
  */
 enum {
-       FSCACHE_OBJECT_EV_REQUEUE,      /* T if object should be requeued */
+       FSCACHE_OBJECT_EV_NEW_CHILD,    /* T if object has a new child */
+       FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
        FSCACHE_OBJECT_EV_UPDATE,       /* T if object should be updated */
        FSCACHE_OBJECT_EV_INVALIDATE,   /* T if cache requested object invalidation */
        FSCACHE_OBJECT_EV_CLEARED,      /* T if accessors all gone */
        FSCACHE_OBJECT_EV_ERROR,        /* T if fatal error occurred during processing */
-       FSCACHE_OBJECT_EV_RELEASE,      /* T if netfs requested object release */
-       FSCACHE_OBJECT_EV_RETIRE,       /* T if netfs requested object retirement */
-       FSCACHE_OBJECT_EV_WITHDRAW,     /* T if cache requested object withdrawal */
+       FSCACHE_OBJECT_EV_KILL,         /* T if netfs relinquished or cache withdrew object */
        NR_FSCACHE_OBJECT_EVENTS
 };
 
 #define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
 
+/*
+ * States for object state machine.
+ */
+struct fscache_transition {
+       unsigned long events;
+       const struct fscache_state *transit_to;
+};
+
+struct fscache_state {
+       char name[24];
+       char short_name[8];
+       const struct fscache_state *(*work)(struct fscache_object *object,
+                                           int event);
+       const struct fscache_transition transitions[];
+};
+
 /*
  * on-disk cache file or index handle
  */
 struct fscache_object {
-       enum fscache_object_state {
-               FSCACHE_OBJECT_INIT,            /* object in initial unbound state */
-               FSCACHE_OBJECT_LOOKING_UP,      /* looking up object */
-               FSCACHE_OBJECT_CREATING,        /* creating object */
-
-               /* active states */
-               FSCACHE_OBJECT_AVAILABLE,       /* cleaning up object after creation */
-               FSCACHE_OBJECT_ACTIVE,          /* object is usable */
-               FSCACHE_OBJECT_INVALIDATING,    /* object is invalidating */
-               FSCACHE_OBJECT_UPDATING,        /* object is updating */
-
-               /* terminal states */
-               FSCACHE_OBJECT_DYING,           /* object waiting for accessors to finish */
-               FSCACHE_OBJECT_LC_DYING,        /* object cleaning up after lookup/create */
-               FSCACHE_OBJECT_ABORT_INIT,      /* abort the init state */
-               FSCACHE_OBJECT_RELEASING,       /* releasing object */
-               FSCACHE_OBJECT_RECYCLING,       /* retiring object */
-               FSCACHE_OBJECT_WITHDRAWING,     /* withdrawing object */
-               FSCACHE_OBJECT_DEAD,            /* object is now dead */
-               FSCACHE_OBJECT__NSTATES
-       } state;
-
+       const struct fscache_state *state;      /* Object state machine state */
+       const struct fscache_transition *oob_table; /* OOB state transition table */
        int                     debug_id;       /* debugging ID */
        int                     n_children;     /* number of child objects */
        int                     n_ops;          /* number of extant ops on object */
@@ -390,6 +384,7 @@ struct fscache_object {
        spinlock_t              lock;           /* state and operations lock */
 
        unsigned long           lookup_jif;     /* time at which lookup started */
+       unsigned long           oob_event_mask; /* OOB events this object is interested in */
        unsigned long           event_mask;     /* events this object is interested in */
        unsigned long           events;         /* events to be processed by this object
                                                 * (order is important - using fls) */
@@ -398,6 +393,9 @@ struct fscache_object {
 #define FSCACHE_OBJECT_LOCK            0       /* T if object is busy being processed */
 #define FSCACHE_OBJECT_PENDING_WRITE   1       /* T if object has pending write */
 #define FSCACHE_OBJECT_WAITING         2       /* T if object is waiting on its parent */
+#define FSCACHE_OBJECT_IS_LIVE         3       /* T if object is not withdrawn or relinquished */
+#define FSCACHE_OBJECT_IS_LOOKED_UP    4       /* T if object has been looked up */
+#define FSCACHE_OBJECT_IS_AVAILABLE    5       /* T if object has become active */
 
        struct list_head        cache_link;     /* link in cache->object_list */
        struct hlist_node       cookie_link;    /* link in cookie->backing_objects */
@@ -415,62 +413,40 @@ struct fscache_object {
        loff_t                  store_limit_l;  /* current storage limit */
 };
 
-extern const char *fscache_object_states[];
+extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
+                               struct fscache_cache *);
+extern void fscache_object_destroy(struct fscache_object *);
 
-#define fscache_object_is_active(obj)                        \
-       (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) &&  \
-        (obj)->state >= FSCACHE_OBJECT_AVAILABLE &&          \
-        (obj)->state < FSCACHE_OBJECT_DYING)
+extern void fscache_object_lookup_negative(struct fscache_object *object);
+extern void fscache_obtained_object(struct fscache_object *object);
 
-#define fscache_object_is_dead(obj)                            \
-       (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) &&     \
-        (obj)->state >= FSCACHE_OBJECT_DYING)
+static inline bool fscache_object_is_live(struct fscache_object *object)
+{
+       return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+}
 
-extern void fscache_object_work_func(struct work_struct *work);
+static inline bool fscache_object_is_dying(struct fscache_object *object)
+{
+       return !fscache_object_is_live(object);
+}
 
-/**
- * fscache_object_init - Initialise a cache object description
- * @object: Object description
- *
- * Initialise a cache object description to its basic values.
- *
- * See Documentation/filesystems/caching/backend-api.txt for a complete
- * description.
- */
-static inline
-void fscache_object_init(struct fscache_object *object,
-                        struct fscache_cookie *cookie,
-                        struct fscache_cache *cache)
+static inline bool fscache_object_is_available(struct fscache_object *object)
 {
-       atomic_inc(&cache->object_count);
-
-       object->state = FSCACHE_OBJECT_INIT;
-       spin_lock_init(&object->lock);
-       INIT_LIST_HEAD(&object->cache_link);
-       INIT_HLIST_NODE(&object->cookie_link);
-       INIT_WORK(&object->work, fscache_object_work_func);
-       INIT_LIST_HEAD(&object->dependents);
-       INIT_LIST_HEAD(&object->dep_link);
-       INIT_LIST_HEAD(&object->pending_ops);
-       object->n_children = 0;
-       object->n_ops = object->n_in_progress = object->n_exclusive = 0;
-       object->events = object->event_mask = 0;
-       object->flags = 0;
-       object->store_limit = 0;
-       object->store_limit_l = 0;
-       object->cache = cache;
-       object->cookie = cookie;
-       object->parent = NULL;
+       return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
 }
 
-extern void fscache_object_lookup_negative(struct fscache_object *object);
-extern void fscache_obtained_object(struct fscache_object *object);
+static inline bool fscache_object_is_active(struct fscache_object *object)
+{
+       return fscache_object_is_available(object) &&
+               fscache_object_is_live(object) &&
+               !test_bit(FSCACHE_IOERROR, &object->cache->flags);
+}
 
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
-extern void fscache_object_destroy(struct fscache_object *object);
-#else
-#define fscache_object_destroy(object) do {} while(0)
-#endif
+static inline bool fscache_object_is_dead(struct fscache_object *object)
+{
+       return fscache_object_is_dying(object) &&
+               test_bit(FSCACHE_IOERROR, &object->cache->flags);
+}
 
 /**
  * fscache_object_destroyed - Note destruction of an object in a cache
@@ -531,6 +507,33 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
        op->end_io_func(page, op->context, error);
 }
 
+/**
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @object: Object description
+ * 
+ * Request usage of the cookie attached to an object.  NULL is returned if the
+ * relinquishment had reduced the cookie usage count to 0.
+ */
+static inline bool fscache_use_cookie(struct fscache_object *object)
+{
+       struct fscache_cookie *cookie = object->cookie;
+       return atomic_inc_not_zero(&cookie->n_active) != 0;
+}
+
+/**
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @object: Object description
+ * 
+ * Cease usage of the cookie attached to an object.  When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
+ */
+static inline void fscache_unuse_cookie(struct fscache_object *object)
+{
+       struct fscache_cookie *cookie = object->cookie;
+       if (atomic_dec_and_test(&cookie->n_active))
+               wake_up_atomic_t(&cookie->n_active);
+}
+
 /*
  * out-of-line cache backend functions
  */
index 52bd03b389625c4f449052d021547712a0022cbe..637fa71de0c7541fc5011fc764fe23c391f6debc 100644 (file)
@@ -44,7 +44,7 @@ struct vlan_hdr {
  *     struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
  *     @h_dest: destination ethernet address
  *     @h_source: source ethernet address
- *     @h_vlan_proto: ethernet protocol (always 0x8100)
+ *     @h_vlan_proto: ethernet protocol
  *     @h_vlan_TCI: priority and VLAN ID
  *     @h_vlan_encapsulated_proto: packet type ID or len
  */
index 7e0b622503c4ae90300c0e9281a7c02cc55ac9a4..8685d1be12c71a2d28d8be80b3bbe9ae507e1ee4 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/buffer_head.h>
 #include <linux/journal-head.h>
 #include <linux/stddef.h>
-#include <linux/bit_spinlock.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/lockdep.h>
@@ -244,6 +243,31 @@ typedef struct journal_superblock_s
 
 #include <linux/fs.h>
 #include <linux/sched.h>
+
+enum jbd_state_bits {
+       BH_JBD                  /* Has an attached ext3 journal_head */
+         = BH_PrivateStart,
+       BH_JWrite,              /* Being written to log (@@@ DEBUGGING) */
+       BH_Freed,               /* Has been freed (truncated) */
+       BH_Revoked,             /* Has been revoked from the log */
+       BH_RevokeValid,         /* Revoked flag is valid */
+       BH_JBDDirty,            /* Is dirty but journaled */
+       BH_State,               /* Pins most journal_head state */
+       BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
+       BH_Unshadow,            /* Dummy bit, for BJ_Shadow wakeup filtering */
+       BH_JBDPrivateStart,     /* First bit available for private use by FS */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+
 #include <linux/jbd_common.h>
 
 #define J_ASSERT(assert)       BUG_ON(!(assert))
@@ -840,7 +864,7 @@ extern void  journal_release_buffer (handle_t *, struct buffer_head *);
 extern int      journal_forget (handle_t *, struct buffer_head *);
 extern void     journal_sync_buffer (struct buffer_head *);
 extern void     journal_invalidatepage(journal_t *,
-                               struct page *, unsigned long);
+                               struct page *, unsigned int, unsigned int);
 extern int      journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
 extern int      journal_stop(handle_t *);
 extern int      journal_flush (journal_t *);
index 6e051f472edb6db62f7e292b71c3bc5b2c5575f5..d5b50a19463c0c1a43eaed5fb8dff41789f8f472 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/buffer_head.h>
 #include <linux/journal-head.h>
 #include <linux/stddef.h>
-#include <linux/bit_spinlock.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
  */
 #define JBD2_EXPENSIVE_CHECKING
 extern ushort jbd2_journal_enable_debug;
+void __jbd2_debug(int level, const char *file, const char *func,
+                 unsigned int line, const char *fmt, ...);
 
-#define jbd_debug(n, f, a...)                                          \
-       do {                                                            \
-               if ((n) <= jbd2_journal_enable_debug) {                 \
-                       printk (KERN_DEBUG "(%s, %d): %s: ",            \
-                               __FILE__, __LINE__, __func__);  \
-                       printk (f, ## a);                               \
-               }                                                       \
-       } while (0)
+#define jbd_debug(n, fmt, a...) \
+       __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
 #else
-#define jbd_debug(f, a...)     /**/
+#define jbd_debug(n, fmt, a...)    /**/
 #endif
 
 extern void *jbd2_alloc(size_t size, gfp_t flags);
@@ -302,6 +297,34 @@ typedef struct journal_superblock_s
 
 #include <linux/fs.h>
 #include <linux/sched.h>
+
+enum jbd_state_bits {
+       BH_JBD                  /* Has an attached ext3 journal_head */
+         = BH_PrivateStart,
+       BH_JWrite,              /* Being written to log (@@@ DEBUGGING) */
+       BH_Freed,               /* Has been freed (truncated) */
+       BH_Revoked,             /* Has been revoked from the log */
+       BH_RevokeValid,         /* Revoked flag is valid */
+       BH_JBDDirty,            /* Is dirty but journaled */
+       BH_State,               /* Pins most journal_head state */
+       BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
+       BH_Shadow,              /* IO on shadow buffer is running */
+       BH_Verified,            /* Metadata block has been verified ok */
+       BH_JBDPrivateStart,     /* First bit available for private use by FS */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+BUFFER_FNS(Shadow, shadow)
+BUFFER_FNS(Verified, verified)
+
 #include <linux/jbd_common.h>
 
 #define J_ASSERT(assert)       BUG_ON(!(assert))
@@ -382,8 +405,15 @@ struct jbd2_revoke_table_s;
 
 struct jbd2_journal_handle
 {
-       /* Which compound transaction is this update a part of? */
-       transaction_t           *h_transaction;
+       union {
+               /* Which compound transaction is this update a part of? */
+               transaction_t   *h_transaction;
+               /* Which journal handle belongs to - used iff h_reserved set */
+               journal_t       *h_journal;
+       };
+
+       /* Handle reserved for finishing the logical operation */
+       handle_t                *h_rsv_handle;
 
        /* Number of remaining buffers we are allowed to dirty: */
        int                     h_buffer_credits;
@@ -398,6 +428,7 @@ struct jbd2_journal_handle
        /* Flags [no locking] */
        unsigned int    h_sync:         1;      /* sync-on-close */
        unsigned int    h_jdata:        1;      /* force data journaling */
+       unsigned int    h_reserved:     1;      /* handle with reserved credits */
        unsigned int    h_aborted:      1;      /* fatal error on handle */
        unsigned int    h_type:         8;      /* for handle statistics */
        unsigned int    h_line_no:      16;     /* for handle statistics */
@@ -523,12 +554,6 @@ struct transaction_s
         */
        struct journal_head     *t_checkpoint_io_list;
 
-       /*
-        * Doubly-linked circular list of temporary buffers currently undergoing
-        * IO in the log [j_list_lock]
-        */
-       struct journal_head     *t_iobuf_list;
-
        /*
         * Doubly-linked circular list of metadata buffers being shadowed by log
         * IO.  The IO buffers on the iobuf list and the shadow buffers on this
@@ -536,12 +561,6 @@ struct transaction_s
         */
        struct journal_head     *t_shadow_list;
 
-       /*
-        * Doubly-linked circular list of control buffers being written to the
-        * log. [j_list_lock]
-        */
-       struct journal_head     *t_log_list;
-
        /*
         * List of inodes whose data we've modified in data=ordered mode.
         * [j_list_lock]
@@ -671,11 +690,10 @@ jbd2_time_diff(unsigned long start, unsigned long end)
  *  waiting for checkpointing
  * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
  *  to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
  * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint:  Wait queue to trigger checkpointing
  * @j_wait_commit: Wait queue to trigger commit
  * @j_wait_updates: Wait queue to wait for updates to complete
+ * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
  * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
  * @j_head: Journal head - identifies the first unused block in the journal
  * @j_tail: Journal tail - identifies the oldest still-used block in the
@@ -689,6 +707,7 @@ jbd2_time_diff(unsigned long start, unsigned long end)
  *     journal
  * @j_fs_dev: Device which holds the client fs.  For internal journal this will
  *     be equal to j_dev
+ * @j_reserved_credits: Number of buffers reserved from the running transaction
  * @j_maxlen: Total maximum capacity of the journal region on disk.
  * @j_list_lock: Protects the buffer lists and internal buffer state.
  * @j_inode: Optional inode where we store the journal.  If present, all journal
@@ -778,21 +797,18 @@ struct journal_s
         */
        wait_queue_head_t       j_wait_transaction_locked;
 
-       /* Wait queue for waiting for checkpointing to complete */
-       wait_queue_head_t       j_wait_logspace;
-
        /* Wait queue for waiting for commit to complete */
        wait_queue_head_t       j_wait_done_commit;
 
-       /* Wait queue to trigger checkpointing */
-       wait_queue_head_t       j_wait_checkpoint;
-
        /* Wait queue to trigger commit */
        wait_queue_head_t       j_wait_commit;
 
        /* Wait queue to wait for updates to complete */
        wait_queue_head_t       j_wait_updates;
 
+       /* Wait queue to wait for reserved buffer credits to drop */
+       wait_queue_head_t       j_wait_reserved;
+
        /* Semaphore for locking against concurrent checkpoints */
        struct mutex            j_checkpoint_mutex;
 
@@ -847,6 +863,9 @@ struct journal_s
        /* Total maximum capacity of the journal region on disk. */
        unsigned int            j_maxlen;
 
+       /* Number of buffers reserved from the running transaction */
+       atomic_t                j_reserved_credits;
+
        /*
         * Protects the buffer lists and internal buffer state.
         */
@@ -991,9 +1010,17 @@ extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, i
 extern void __journal_free_buffer(struct journal_head *bh);
 extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
 extern void __journal_clean_data_list(transaction_t *transaction);
+static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
+{
+       list_add_tail(&bh->b_assoc_buffers, head);
+}
+static inline void jbd2_unfile_log_bh(struct buffer_head *bh)
+{
+       list_del_init(&bh->b_assoc_buffers);
+}
 
 /* Log buffer allocation */
-extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
+struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
 int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
 int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
                              unsigned long *block);
@@ -1039,11 +1066,10 @@ extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
                                      struct jbd2_buffer_trigger_type *triggers);
 
 /* Buffer IO */
-extern int
-jbd2_journal_write_metadata_buffer(transaction_t         *transaction,
-                             struct journal_head  *jh_in,
-                             struct journal_head **jh_out,
-                             unsigned long long   blocknr);
+extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
+                                             struct journal_head *jh_in,
+                                             struct buffer_head **bh_out,
+                                             sector_t blocknr);
 
 /* Transaction locking */
 extern void            __wait_on_journal (journal_t *);
@@ -1076,10 +1102,14 @@ static inline handle_t *journal_current_handle(void)
  */
 
 extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
-extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask,
-                                    unsigned int type, unsigned int line_no);
+extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
+                                    gfp_t gfp_mask, unsigned int type,
+                                    unsigned int line_no);
 extern int      jbd2_journal_restart(handle_t *, int nblocks);
 extern int      jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
+extern int      jbd2_journal_start_reserved(handle_t *handle,
+                               unsigned int type, unsigned int line_no);
+extern void     jbd2_journal_free_reserved(handle_t *handle);
 extern int      jbd2_journal_extend (handle_t *, int nblocks);
 extern int      jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
 extern int      jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
@@ -1090,7 +1120,7 @@ extern int         jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
 extern int      jbd2_journal_forget (handle_t *, struct buffer_head *);
 extern void     journal_sync_buffer (struct buffer_head *);
 extern int      jbd2_journal_invalidatepage(journal_t *,
-                               struct page *, unsigned long);
+                               struct page *, unsigned int, unsigned int);
 extern int      jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
 extern int      jbd2_journal_stop(handle_t *);
 extern int      jbd2_journal_flush (journal_t *);
@@ -1125,6 +1155,7 @@ extern void          jbd2_journal_ack_err    (journal_t *);
 extern int        jbd2_journal_clear_err  (journal_t *);
 extern int        jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
 extern int        jbd2_journal_force_commit(journal_t *);
+extern int        jbd2_journal_force_commit_nested(journal_t *);
 extern int        jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
 extern int        jbd2_journal_begin_ordered_truncate(journal_t *journal,
                                struct jbd2_inode *inode, loff_t new_size);
@@ -1178,8 +1209,10 @@ extern int          jbd2_journal_init_revoke_caches(void);
 extern void       jbd2_journal_destroy_revoke(journal_t *);
 extern int        jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
 extern int        jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void       jbd2_journal_write_revoke_records(journal_t *,
-                                                    transaction_t *, int);
+extern void       jbd2_journal_write_revoke_records(journal_t *journal,
+                                                    transaction_t *transaction,
+                                                    struct list_head *log_bufs,
+                                                    int write_op);
 
 /* Recovery revoke support */
 extern int     jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
@@ -1195,11 +1228,9 @@ extern void      jbd2_clear_buffer_revoked_flags(journal_t *journal);
  * transitions on demand.
  */
 
-int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
 int jbd2_log_start_commit(journal_t *journal, tid_t tid);
 int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
 int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
-int jbd2_journal_force_commit_nested(journal_t *journal);
 int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
 int jbd2_complete_transaction(journal_t *journal, tid_t tid);
 int jbd2_log_do_checkpoint(journal_t *journal);
@@ -1235,7 +1266,7 @@ static inline int is_journal_aborted(journal_t *journal)
 
 static inline int is_handle_aborted(handle_t *handle)
 {
-       if (handle->h_aborted)
+       if (handle->h_aborted || !handle->h_transaction)
                return 1;
        return is_journal_aborted(handle->h_transaction->t_journal);
 }
@@ -1265,17 +1296,38 @@ static inline int tid_geq(tid_t x, tid_t y)
 extern int jbd2_journal_blocks_per_page(struct inode *inode);
 extern size_t journal_tag_bytes(journal_t *journal);
 
+/*
+ * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
+ * transaction control blocks.
+ */
+#define JBD2_CONTROL_BLOCKS_SHIFT 5
+
 /*
  * Return the minimum number of blocks which must be free in the journal
  * before a new transaction may be started.  Must be called under j_state_lock.
  */
-static inline int jbd_space_needed(journal_t *journal)
+static inline int jbd2_space_needed(journal_t *journal)
 {
        int nblocks = journal->j_max_transaction_buffers;
-       if (journal->j_committing_transaction)
-               nblocks += atomic_read(&journal->j_committing_transaction->
-                                      t_outstanding_credits);
-       return nblocks;
+       return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
+}
+
+/*
+ * Return number of free blocks in the log. Must be called under j_state_lock.
+ */
+static inline unsigned long jbd2_log_space_left(journal_t *journal)
+{
+       /* Allow for rounding errors */
+       unsigned long free = journal->j_free - 32;
+
+       if (journal->j_committing_transaction) {
+               unsigned long committing = atomic_read(&journal->
+                       j_committing_transaction->t_outstanding_credits);
+
+               /* Transaction + control blocks */
+               free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+       }
+       return free;
 }
 
 /*
@@ -1286,11 +1338,9 @@ static inline int jbd_space_needed(journal_t *journal)
 #define BJ_None                0       /* Not journaled */
 #define BJ_Metadata    1       /* Normal journaled metadata */
 #define BJ_Forget      2       /* Buffer superseded by this transaction */
-#define BJ_IO          3       /* Buffer is for temporary IO use */
-#define BJ_Shadow      4       /* Buffer contents being shadowed to the log */
-#define BJ_LogCtl      5       /* Buffer contains log descriptors */
-#define BJ_Reserved    6       /* Buffer is reserved for access by journal */
-#define BJ_Types       7
+#define BJ_Shadow      3       /* Buffer contents being shadowed to the log */
+#define BJ_Reserved    4       /* Buffer is reserved for access by journal */
+#define BJ_Types       5
 
 extern int jbd_blocks_per_page(struct inode *inode);
 
@@ -1319,6 +1369,19 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
        return *(u32 *)desc.ctx;
 }
 
+/* Return most recent uncommitted transaction */
+static inline tid_t  jbd2_get_latest_transaction(journal_t *journal)
+{
+       tid_t tid;
+
+       read_lock(&journal->j_state_lock);
+       tid = journal->j_commit_request;
+       if (journal->j_running_transaction)
+               tid = journal->j_running_transaction->t_tid;
+       read_unlock(&journal->j_state_lock);
+       return tid;
+}
+
 #ifdef __KERNEL__
 
 #define buffer_trace_init(bh)  do {} while (0)
index 6133679bc4c01ace20a0114fd50ff7c3481c7eb9..3dc53432355f5edce8a2793a0f639e8f58480c3e 100644 (file)
@@ -1,31 +1,7 @@
 #ifndef _LINUX_JBD_STATE_H
 #define _LINUX_JBD_STATE_H
 
-enum jbd_state_bits {
-       BH_JBD                  /* Has an attached ext3 journal_head */
-         = BH_PrivateStart,
-       BH_JWrite,              /* Being written to log (@@@ DEBUGGING) */
-       BH_Freed,               /* Has been freed (truncated) */
-       BH_Revoked,             /* Has been revoked from the log */
-       BH_RevokeValid,         /* Revoked flag is valid */
-       BH_JBDDirty,            /* Is dirty but journaled */
-       BH_State,               /* Pins most journal_head state */
-       BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
-       BH_Unshadow,            /* Dummy bit, for BJ_Shadow wakeup filtering */
-       BH_Verified,            /* Metadata block has been verified ok */
-       BH_JBDPrivateStart,     /* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-BUFFER_FNS(Verified, verified)
+#include <linux/bit_spinlock.h>
 
 static inline struct buffer_head *jh2bh(struct journal_head *jh)
 {
index f0eea07d2c2bb5168820443639af6e85a9d47850..8db53cfaccdb64fdbd18d29935e698d101bf5bf1 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/ratelimit.h>
 #include <linux/err.h>
 #include <linux/irqflags.h>
+#include <linux/context_tracking.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -760,42 +761,6 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 }
 #endif
 
-static inline void __guest_enter(void)
-{
-       /*
-        * This is running in ioctl context so we can avoid
-        * the call to vtime_account() with its unnecessary idle check.
-        */
-       vtime_account_system(current);
-       current->flags |= PF_VCPU;
-}
-
-static inline void __guest_exit(void)
-{
-       /*
-        * This is running in ioctl context so we can avoid
-        * the call to vtime_account() with its unnecessary idle check.
-        */
-       vtime_account_system(current);
-       current->flags &= ~PF_VCPU;
-}
-
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void guest_enter(void);
-extern void guest_exit(void);
-
-#else /* !CONFIG_CONTEXT_TRACKING */
-static inline void guest_enter(void)
-{
-       __guest_enter();
-}
-
-static inline void guest_exit(void)
-{
-       __guest_exit();
-}
-#endif /* !CONFIG_CONTEXT_TRACKING */
-
 static inline void kvm_guest_enter(void)
 {
        unsigned long flags;
diff --git a/include/linux/loop.h b/include/linux/loop.h
deleted file mode 100644 (file)
index 460b60f..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * include/linux/loop.h
- *
- * Written by Theodore Ts'o, 3/29/93.
- *
- * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
- * permitted under the GNU General Public License.
- */
-#ifndef _LINUX_LOOP_H
-#define _LINUX_LOOP_H
-
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <uapi/linux/loop.h>
-
-/* Possible states of device */
-enum {
-       Lo_unbound,
-       Lo_bound,
-       Lo_rundown,
-};
-
-struct loop_func_table;
-
-struct loop_device {
-       int             lo_number;
-       int             lo_refcnt;
-       loff_t          lo_offset;
-       loff_t          lo_sizelimit;
-       int             lo_flags;
-       int             (*transfer)(struct loop_device *, int cmd,
-                                   struct page *raw_page, unsigned raw_off,
-                                   struct page *loop_page, unsigned loop_off,
-                                   int size, sector_t real_block);
-       char            lo_file_name[LO_NAME_SIZE];
-       char            lo_crypt_name[LO_NAME_SIZE];
-       char            lo_encrypt_key[LO_KEY_SIZE];
-       int             lo_encrypt_key_size;
-       struct loop_func_table *lo_encryption;
-       __u32           lo_init[2];
-       kuid_t          lo_key_owner;   /* Who set the key */
-       int             (*ioctl)(struct loop_device *, int cmd, 
-                                unsigned long arg); 
-
-       struct file *   lo_backing_file;
-       struct block_device *lo_device;
-       unsigned        lo_blocksize;
-       void            *key_data; 
-
-       gfp_t           old_gfp_mask;
-
-       spinlock_t              lo_lock;
-       struct bio_list         lo_bio_list;
-       unsigned int            lo_bio_count;
-       int                     lo_state;
-       struct mutex            lo_ctl_mutex;
-       struct task_struct      *lo_thread;
-       wait_queue_head_t       lo_event;
-       /* wait queue for incoming requests */
-       wait_queue_head_t       lo_req_wait;
-
-       struct request_queue    *lo_queue;
-       struct gendisk          *lo_disk;
-};
-
-/* Support for loadable transfer modules */
-struct loop_func_table {
-       int number;     /* filter type */ 
-       int (*transfer)(struct loop_device *lo, int cmd,
-                       struct page *raw_page, unsigned raw_off,
-                       struct page *loop_page, unsigned loop_off,
-                       int size, sector_t real_block);
-       int (*init)(struct loop_device *, const struct loop_info64 *); 
-       /* release is called from loop_unregister_transfer or clr_fd */
-       int (*release)(struct loop_device *); 
-       int (*ioctl)(struct loop_device *, int cmd, unsigned long arg);
-       struct module *owner;
-}; 
-
-int loop_register_transfer(struct loop_func_table *funcs);
-int loop_unregister_transfer(int number); 
-
-#endif
index e0c8528a41a4d4a278fe736a46341755d46c5479..66d881f1d57665b5791d3a5a357c8cbff3d4c9a0 100644 (file)
@@ -1041,7 +1041,8 @@ int get_kernel_page(unsigned long start, int write, struct page **pages);
 struct page *get_dump_page(unsigned long addr);
 
 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
-extern void do_invalidatepage(struct page *page, unsigned long offset);
+extern void do_invalidatepage(struct page *page, unsigned int offset,
+                             unsigned int length);
 
 int __set_page_dirty_nobuffers(struct page *page);
 int __set_page_dirty_no_writeback(struct page *page);
index 60584b185a0c5d4ad61ed0435982ad4287dea8fc..96e4c21e15e04e3529021d2e244c41b14d61d05b 100644 (file)
@@ -1695,6 +1695,7 @@ extern int                init_dummy_netdev(struct net_device *dev);
 extern struct net_device       *dev_get_by_index(struct net *net, int ifindex);
 extern struct net_device       *__dev_get_by_index(struct net *net, int ifindex);
 extern struct net_device       *dev_get_by_index_rcu(struct net *net, int ifindex);
+extern int             netdev_get_name(struct net *net, char *name, int ifindex);
 extern int             dev_restart(struct net_device *dev);
 #ifdef CONFIG_NETPOLL_TRAP
 extern int             netpoll_trap(void);
index f463a46424e240715f2cc0c3bbf58f38955db1a8..c5b6dbf9c2fcd5c90d0b16d2e4e6e330af3ed0b0 100644 (file)
@@ -389,8 +389,7 @@ struct perf_event {
        /* mmap bits */
        struct mutex                    mmap_mutex;
        atomic_t                        mmap_count;
-       int                             mmap_locked;
-       struct user_struct              *mmap_user;
+
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
 
index 85dcc709f7e92007c0a5d753c3efc3cea443f0b8..1d1b6ef871f6ace33196cc893a1c8b45d1375b40 100644 (file)
@@ -3,6 +3,10 @@
 
 #define USB3503_I2C_NAME       "usb3503"
 
+#define USB3503_OFF_PORT1      (1 << 1)
+#define USB3503_OFF_PORT2      (1 << 2)
+#define USB3503_OFF_PORT3      (1 << 3)
+
 enum usb3503_mode {
        USB3503_MODE_UNKNOWN,
        USB3503_MODE_HUB,
@@ -11,6 +15,7 @@ enum usb3503_mode {
 
 struct usb3503_platform_data {
        enum usb3503_mode       initial_mode;
+       u8      port_off_mask;
        int     gpio_intn;
        int     gpio_connect;
        int     gpio_reset;
index 87a03c746f177be0b4ddc0dee6a61860d596ff3d..f5d4723cdb3d3e6ab30d5af02d1eaadbdc7d0ee3 100644 (file)
@@ -33,9 +33,25 @@ do { \
                preempt_schedule(); \
 } while (0)
 
+#ifdef CONFIG_CONTEXT_TRACKING
+
+void preempt_schedule_context(void);
+
+#define preempt_check_resched_context() \
+do { \
+       if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+               preempt_schedule_context(); \
+} while (0)
+#else
+
+#define preempt_check_resched_context() preempt_check_resched()
+
+#endif /* CONFIG_CONTEXT_TRACKING */
+
 #else /* !CONFIG_PREEMPT */
 
 #define preempt_check_resched()                do { } while (0)
+#define preempt_check_resched_context()        do { } while (0)
 
 #endif /* CONFIG_PREEMPT */
 
@@ -88,7 +104,7 @@ do { \
 do { \
        preempt_enable_no_resched_notrace(); \
        barrier(); \
-       preempt_check_resched(); \
+       preempt_check_resched_context(); \
 } while (0)
 
 #else /* !CONFIG_PREEMPT_COUNT */
index 87d4bbc773fc7d8d7223423c9d388accc32c527c..b98291ac7f141f0b0cf33b569d7dde0796b60f8c 100644 (file)
 #include <linux/sysrq.h>
 #include <uapi/linux/serial_core.h>
 
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) \
+       ((port)->cons && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port)      (0)
+#endif
+
 struct uart_port;
 struct serial_struct;
 struct device;
index 9c676eae396867fbcc656bc9466fe19459f31b2f..dec1748cd00225b5046cb58096ef08305816a3f1 100644 (file)
@@ -627,6 +627,7 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 }
 
 extern void kfree_skb(struct sk_buff *skb);
+extern void kfree_skb_list(struct sk_buff *segs);
 extern void skb_tx_error(struct sk_buff *skb);
 extern void consume_skb(struct sk_buff *skb);
 extern void           __kfree_skb(struct sk_buff *skb);
index 09a545a7dfa39bcd7f736f446358d4c3b112aae4..74575cbf2d6f579c317fec5f1f16e9d3793fab03 100644 (file)
@@ -35,6 +35,7 @@ struct splice_desc {
                void *data;             /* cookie */
        } u;
        loff_t pos;                     /* file position */
+       loff_t *opos;                   /* sendfile: output position */
        size_t num_spliced;             /* number of bytes already spliced */
        bool need_wakeup;               /* need to wake up writer */
 };
index 8780bd2a272ab6672c8f6c32340b7d1ab2831acb..01ac30efd6a6fbe649a97185c745e48357a47579 100644 (file)
@@ -272,7 +272,6 @@ struct tty_struct {
 #define N_TTY_BUF_SIZE 4096
 
        unsigned char closing:1;
-       unsigned short minimum_to_wake;
        unsigned char *write_buf;
        int write_cnt;
        /* If the tty has a pending do_SAK, queue it here - akpm */
@@ -309,8 +308,6 @@ struct tty_file_private {
 #define TTY_LDISC              9       /* Line discipline attached */
 #define TTY_LDISC_CHANGING     10      /* Line discipline changing */
 #define TTY_LDISC_OPEN         11      /* Line discipline is open */
-#define TTY_HW_COOK_OUT        14      /* Hardware can do output cooking */
-#define TTY_HW_COOK_IN                 15      /* Hardware can do input cooking */
 #define TTY_PTY_LOCK           16      /* pty private */
 #define TTY_NO_WRITE_SPLIT     17      /* Preserve write boundaries to driver */
 #define TTY_HUPPED             18      /* Post driver->hangup() */
index 58390c73df8b69ae146ce466af4e24ccadce2cdf..a1b0489998212d8db2bf831145da0bb87f4ee594 100644 (file)
  *     seek to perform this action quickly but should wait until
  *     any pending driver I/O is completed.
  *
+ * void (*fasync)(struct tty_struct *, int on)
+ *
+ *     Notify line discipline when signal-driven I/O is enabled or
+ *     disabled.
+ *
  * void (*dcd_change)(struct tty_struct *tty, unsigned int status)
  *
  *     Tells the discipline that the DCD pin has changed its status.
 #include <linux/wait.h>
 #include <linux/wait.h>
 
+
+/*
+ * the semaphore definition
+ */
+struct ld_semaphore {
+       long                    count;
+       raw_spinlock_t          wait_lock;
+       unsigned int            wait_readers;
+       struct list_head        read_wait;
+       struct list_head        write_wait;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
+};
+
+extern void __init_ldsem(struct ld_semaphore *sem, const char *name,
+                        struct lock_class_key *key);
+
+#define init_ldsem(sem)                                                \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       __init_ldsem((sem), #sem, &__key);                      \
+} while (0)
+
+
+extern int ldsem_down_read(struct ld_semaphore *sem, long timeout);
+extern int ldsem_down_read_trylock(struct ld_semaphore *sem);
+extern int ldsem_down_write(struct ld_semaphore *sem, long timeout);
+extern int ldsem_down_write_trylock(struct ld_semaphore *sem);
+extern void ldsem_up_read(struct ld_semaphore *sem);
+extern void ldsem_up_write(struct ld_semaphore *sem);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
+                                 long timeout);
+extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
+                                  long timeout);
+#else
+# define ldsem_down_read_nested(sem, subclass, timeout)                \
+               ldsem_down_read(sem, timeout)
+# define ldsem_down_write_nested(sem, subclass, timeout)       \
+               ldsem_down_write(sem, timeout)
+#endif
+
+
 struct tty_ldisc_ops {
        int     magic;
        char    *name;
@@ -143,6 +194,7 @@ struct tty_ldisc_ops {
                               char *fp, int count);
        void    (*write_wakeup)(struct tty_struct *);
        void    (*dcd_change)(struct tty_struct *, unsigned int);
+       void    (*fasync)(struct tty_struct *tty, int on);
 
        struct  module *owner;
 
index a0bee5a28d1a578a27829edd231968e1ff11ce1d..a232b7ece1f63c687a2f50da506c9e70ec33555e 100644 (file)
@@ -393,6 +393,22 @@ enum usb_port_connect_type {
        USB_PORT_NOT_USED,
 };
 
+/*
+ * USB 2.0 Link Power Management (LPM) parameters.
+ */
+struct usb2_lpm_parameters {
+       /* Best effort service latency indicate how long the host will drive
+        * resume on an exit from L1.
+        */
+       unsigned int besl;
+
+       /* Timeout value in microseconds for the L1 inactivity (LPM) timer.
+        * When the timer counts to zero, the parent hub will initiate a LPM
+        * transition to L1.
+        */
+       int timeout;
+};
+
 /*
  * USB 3.0 Link Power Management (LPM) parameters.
  *
@@ -468,6 +484,7 @@ struct usb3_lpm_parameters {
  * @wusb: device is Wireless USB
  * @lpm_capable: device supports LPM
  * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
+ * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
  * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled
  * @usb3_lpm_enabled: USB3 hardware LPM enabled
  * @string_langid: language ID for strings
@@ -487,6 +504,7 @@ struct usb3_lpm_parameters {
  *     specific data for the device.
  * @slot_id: Slot ID assigned by xHCI
  * @removable: Device can be physically removed from this port
+ * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
  * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
  * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
  * @lpm_disable_count: Ref count used by usb_disable_lpm() and usb_enable_lpm()
@@ -538,6 +556,7 @@ struct usb_device {
        unsigned wusb:1;
        unsigned lpm_capable:1;
        unsigned usb2_hw_lpm_capable:1;
+       unsigned usb2_hw_lpm_besl_capable:1;
        unsigned usb2_hw_lpm_enabled:1;
        unsigned usb3_lpm_enabled:1;
        int string_langid;
@@ -566,6 +585,7 @@ struct usb_device {
        struct wusb_dev *wusb_dev;
        int slot_id;
        enum usb_device_removable removable;
+       struct usb2_lpm_parameters l1_params;
        struct usb3_lpm_parameters u1_params;
        struct usb3_lpm_parameters u2_params;
        unsigned lpm_disable_count;
@@ -717,6 +737,7 @@ const struct usb_device_id *usb_match_id(struct usb_interface *interface,
 extern int usb_match_one_id(struct usb_interface *interface,
                            const struct usb_device_id *id);
 
+extern int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *));
 extern struct usb_interface *usb_find_interface(struct usb_driver *drv,
                int minor);
 extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
index 544825dde82305daeb4d7cf156901921ed73efcb..25629948c84280a67cb304b707013bc9f383b503 100644 (file)
@@ -7,32 +7,33 @@
 
 #include <linux/usb/otg.h>
 
-struct ci13xxx;
-struct ci13xxx_platform_data {
+struct ci_hdrc;
+struct ci_hdrc_platform_data {
        const char      *name;
        /* offset of the capability registers */
        uintptr_t        capoffset;
        unsigned         power_budget;
        struct usb_phy  *phy;
+       enum usb_phy_interface phy_mode;
        unsigned long    flags;
-#define CI13XXX_REGS_SHARED            BIT(0)
-#define CI13XXX_REQUIRE_TRANSCEIVER    BIT(1)
-#define CI13XXX_PULLUP_ON_VBUS         BIT(2)
-#define CI13XXX_DISABLE_STREAMING      BIT(3)
-
-#define CI13XXX_CONTROLLER_RESET_EVENT         0
-#define CI13XXX_CONTROLLER_STOPPED_EVENT       1
-       void    (*notify_event) (struct ci13xxx *ci, unsigned event);
+#define CI_HDRC_REGS_SHARED            BIT(0)
+#define CI_HDRC_REQUIRE_TRANSCEIVER    BIT(1)
+#define CI_HDRC_PULLUP_ON_VBUS         BIT(2)
+#define CI_HDRC_DISABLE_STREAMING      BIT(3)
+       enum usb_dr_mode        dr_mode;
+#define CI_HDRC_CONTROLLER_RESET_EVENT         0
+#define CI_HDRC_CONTROLLER_STOPPED_EVENT       1
+       void    (*notify_event) (struct ci_hdrc *ci, unsigned event);
 };
 
 /* Default offset of capability registers */
 #define DEF_CAPOFFSET          0x100
 
-/* Add ci13xxx device */
-struct platform_device *ci13xxx_add_device(struct device *dev,
+/* Add ci hdrc device */
+struct platform_device *ci_hdrc_add_device(struct device *dev,
                        struct resource *res, int nres,
-                       struct ci13xxx_platform_data *platdata);
-/* Remove ci13xxx device */
-void ci13xxx_remove_device(struct platform_device *pdev);
+                       struct ci_hdrc_platform_data *platdata);
+/* Remove ci hdrc device */
+void ci_hdrc_remove_device(struct platform_device *pdev);
 
 #endif
index f5f5c7dfda90ebe2656176c058672b75854c3176..1e88377e22f4fd6e2bec2be90df8b07eba5d89fc 100644 (file)
@@ -218,6 +218,7 @@ struct hc_driver {
 #define        HCD_SHARED      0x0004          /* Two (or more) usb_hcds share HW */
 #define        HCD_USB11       0x0010          /* USB 1.1 */
 #define        HCD_USB2        0x0020          /* USB 2.0 */
+#define        HCD_USB25       0x0030          /* Wireless USB 1.0 (USB 2.5)*/
 #define        HCD_USB3        0x0040          /* USB 3.0 */
 #define        HCD_MASK        0x0070
 
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
new file mode 100644 (file)
index 0000000..a0ef405
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * OF helpers for usb devices.
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_USB_OF_H
+#define __LINUX_USB_OF_H
+
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+
+#if IS_ENABLED(CONFIG_OF)
+enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
+#else
+static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
+{
+       return USB_DR_MODE_UNKNOWN;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_PHY)
+enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np);
+#else
+static inline enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np)
+{
+       return USBPHY_INTERFACE_MODE_UNKNOWN;
+}
+
+#endif
+
+#endif /* __LINUX_USB_OF_H */
index 291e01ba32e5b80ba49be153ad98523fc220954e..154332b7c8c0d165bd94806c818f666e16e5c9d4 100644 (file)
@@ -92,4 +92,11 @@ otg_start_srp(struct usb_otg *otg)
 /* for OTG controller drivers (and maybe other stuff) */
 extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num);
 
+enum usb_dr_mode {
+       USB_DR_MODE_UNKNOWN,
+       USB_DR_MODE_HOST,
+       USB_DR_MODE_PERIPHERAL,
+       USB_DR_MODE_OTG,
+};
+
 #endif /* __LINUX_USB_OTG_H */
index 6b5978f576331bcfc9e9411211ab82b38f91f4a2..44036808bf0fdd1c2d9fd126ad2616536d3f23a1 100644 (file)
 #include <linux/notifier.h>
 #include <linux/usb.h>
 
+enum usb_phy_interface {
+       USBPHY_INTERFACE_MODE_UNKNOWN,
+       USBPHY_INTERFACE_MODE_UTMI,
+       USBPHY_INTERFACE_MODE_UTMIW,
+       USBPHY_INTERFACE_MODE_ULPI,
+       USBPHY_INTERFACE_MODE_SERIAL,
+       USBPHY_INTERFACE_MODE_HSIC,
+};
+
 enum usb_phy_events {
        USB_EVENT_NONE,         /* no events or cable disconnected */
        USB_EVENT_VBUS,         /* vbus valid event */
index 302ddf55d2daca5d74291b1eb9aaa2b4127fe7d2..d528b804515081f9d5388dc565822241cfd6adc9 100644 (file)
 #include <linux/sysrq.h>
 #include <linux/kfifo.h>
 
-#define SERIAL_TTY_MAJOR       188     /* Nice legal number now */
-#define SERIAL_TTY_MINORS      254     /* loads of devices :) */
-#define SERIAL_TTY_NO_MINOR    255     /* No minor was assigned */
-
 /* The maximum number of ports one device can grab at once */
 #define MAX_NUM_PORTS          8
 
@@ -37,7 +33,8 @@
  * @serial: pointer back to the struct usb_serial owner of this port.
  * @port: pointer to the corresponding tty_port for this port.
  * @lock: spinlock to grab when updating portions of this structure.
- * @number: the number of the port (the minor number).
+ * @minor: the minor number of the port
+ * @port_number: the struct usb_serial port number of this port (starts at 0)
  * @interrupt_in_buffer: pointer to the interrupt in buffer for this port.
  * @interrupt_in_urb: pointer to the interrupt in struct urb for this port.
  * @interrupt_in_endpointAddress: endpoint address for the interrupt in pipe
@@ -80,7 +77,8 @@ struct usb_serial_port {
        struct usb_serial       *serial;
        struct tty_port         port;
        spinlock_t              lock;
-       unsigned char           number;
+       u32                     minor;
+       u8                      port_number;
 
        unsigned char           *interrupt_in_buffer;
        struct urb              *interrupt_in_urb;
@@ -140,7 +138,6 @@ static inline void usb_set_serial_port_data(struct usb_serial_port *port,
  * @dev: pointer to the struct usb_device for this device
  * @type: pointer to the struct usb_serial_driver for this device
  * @interface: pointer to the struct usb_interface for this device
- * @minor: the starting minor number for this device
  * @num_ports: the number of ports this device has
  * @num_interrupt_in: number of interrupt in endpoints we have
  * @num_interrupt_out: number of interrupt out endpoints we have
@@ -159,7 +156,7 @@ struct usb_serial {
        unsigned char                   disconnected:1;
        unsigned char                   suspending:1;
        unsigned char                   attached:1;
-       unsigned char                   minor;
+       unsigned char                   minors_reserved:1;
        unsigned char                   num_ports;
        unsigned char                   num_port_pointers;
        char                            num_interrupt_in;
@@ -319,7 +316,7 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {}
 #endif
 
 /* Functions needed by other parts of the usbserial core */
-extern struct usb_serial *usb_serial_get_by_index(unsigned int minor);
+extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
 extern void usb_serial_put(struct usb_serial *serial);
 extern int usb_serial_generic_open(struct tty_struct *tty,
        struct usb_serial_port *port);
index 1b7519a8c0bf5e509e90fa0b5827713b0aaf219a..d2ca919a5b738387b28293918ce9d2e47ccc053c 100644 (file)
@@ -42,6 +42,7 @@ enum tegra_usb_phy_port_speed {
 enum tegra_usb_phy_mode {
        TEGRA_USB_PHY_MODE_DEVICE,
        TEGRA_USB_PHY_MODE_HOST,
+       TEGRA_USB_PHY_MODE_OTG,
 };
 
 struct tegra_xtal_freq;
@@ -61,14 +62,10 @@ struct tegra_usb_phy {
        struct device *dev;
        bool is_legacy_phy;
        bool is_ulpi_phy;
-       void (*set_pts)(struct usb_phy *x, u8 pts_val);
-       void (*set_phcd)(struct usb_phy *x, bool enable);
+       int reset_gpio;
 };
 
-struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
-       void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode,
-       void (*set_pts)(struct usb_phy *x, u8 pts_val),
-       void (*set_phcd)(struct usb_phy *x, bool enable));
+struct usb_phy *tegra_usb_get_phy(struct device_node *dn);
 
 void tegra_usb_phy_preresume(struct usb_phy *phy);
 
index f9dec37f617bf8c1f4af017e9ac1f55459e23c36..6be985b2a4342fc7ebabc1df2245cb3460aa71cb 100644 (file)
@@ -92,11 +92,20 @@ struct usb_rpipe_descriptor {
        __le16  wRPipeIndex;
        __le16  wRequests;
        __le16  wBlocks;                /* rw if 0 */
-       __le16  wMaxPacketSize;         /* rw? */
-       u8      bHSHubAddress;          /* reserved: 0 */
-       u8      bHSHubPort;             /* ??? FIXME ??? */
+       __le16  wMaxPacketSize;         /* rw */
+       union {
+               u8      dwa_bHSHubAddress;              /* rw: DWA. */
+               u8      hwa_bMaxBurst;                  /* rw: HWA. */
+       };
+       union {
+               u8      dwa_bHSHubPort;         /*  rw: DWA. */
+               u8      hwa_bDeviceInfoIndex;   /*  rw: HWA. */
+       };
        u8      bSpeed;                 /* rw: xfer rate 'enum uwb_phy_rate' */
-       u8      bDeviceAddress;         /* rw: Target device address */
+       union {
+               u8 dwa_bDeviceAddress;  /* rw: DWA Target device address. */
+               u8 hwa_reserved;                /* rw: HWA. */
+       };
        u8      bEndpointAddress;       /* rw: Target EP address */
        u8      bDataSequence;          /* ro: Current Data sequence */
        __le32  dwCurrentWindow;        /* ro */
index 0d33fca487748916faca92cad918c3726f713a57..8d7634247fb4e884b65b1236a858ac8cd62477f0 100644 (file)
@@ -133,8 +133,6 @@ void change_console(struct vc_data *new_vc);
 void reset_vc(struct vc_data *vc);
 extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
                             int deflt);
-extern int unbind_con_driver(const struct consw *csw, int first, int last,
-                            int deflt);
 int vty_init(const struct file_operations *console_fops);
 
 static inline bool vt_force_oops_output(struct vc_data *vc)
index 71a5782d8c592fc027cd1e0ce7c4c5a99787c11f..b1dd2db80076d87d77236b76e9314557a185ae5c 100644 (file)
@@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
 }
 extern void vtime_guest_enter(struct task_struct *tsk);
 extern void vtime_guest_exit(struct task_struct *tsk);
-extern void vtime_init_idle(struct task_struct *tsk);
+extern void vtime_init_idle(struct task_struct *tsk, int cpu);
 #else
 static inline void vtime_account_irq_exit(struct task_struct *tsk)
 {
@@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { }
 static inline void vtime_user_exit(struct task_struct *tsk) { }
 static inline void vtime_guest_enter(struct task_struct *tsk) { }
 static inline void vtime_guest_exit(struct task_struct *tsk) { }
-static inline void vtime_init_idle(struct task_struct *tsk) { }
+static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
 #endif
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
index 1133695eb0671d7aeb7f9140bf2b6adc3bc7483d..f487a4750b7f36e97acef1542cb6076d76ce7a59 100644 (file)
@@ -23,6 +23,7 @@ struct __wait_queue {
 struct wait_bit_key {
        void *flags;
        int bit_nr;
+#define WAIT_ATOMIC_T_BIT_NR -1
 };
 
 struct wait_bit_queue {
@@ -60,6 +61,9 @@ struct task_struct;
 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                          \
        { .flags = word, .bit_nr = bit, }
 
+#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                             \
+       { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
+
 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
 
 #define init_waitqueue_head(q)                         \
@@ -146,8 +150,10 @@ void __wake_up_bit(wait_queue_head_t *, void *, int);
 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
 void wake_up_bit(void *, int);
+void wake_up_atomic_t(atomic_t *);
 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
+int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
 wait_queue_head_t *bit_waitqueue(void *, int);
 
 #define wake_up(x)                     __wake_up(x, TASK_NORMAL, 1, NULL)
@@ -902,5 +908,23 @@ static inline int wait_on_bit_lock(void *word, int bit,
                return 0;
        return out_of_line_wait_on_bit_lock(word, bit, action, mode);
 }
+
+/**
+ * wait_on_atomic_t - Wait for an atomic_t to become 0
+ * @val: The atomic value being waited on, a kernel virtual address
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
+ * the purpose of getting a waitqueue, but we set the key to a bit number
+ * outside of the target 'word'.
+ */
+static inline
+int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
+{
+       if (atomic_read(val) == 0)
+               return 0;
+       return out_of_line_wait_on_atomic_t(val, action, mode);
+}
        
 #endif
index 579a5007c696fc5b9fd95cd326470b70aa096456..abfe11787af318a409c1a95b01b7a6db1ff4acb3 100644 (file)
@@ -78,6 +78,7 @@ struct writeback_control {
        unsigned tagged_writepages:1;   /* tag-and-write to avoid livelock */
        unsigned for_reclaim:1;         /* Invoked from the page allocator */
        unsigned range_cyclic:1;        /* range_start is cyclic */
+       unsigned for_sync:1;            /* sync(2) WB_SYNC_ALL writeback */
 };
 
 /*
index d3eef01da648d49387d6efe41bb2cae66bd6dac6..0f4555b2a31bdfc4a499df23f3b77de6a95769a2 100644 (file)
@@ -110,6 +110,8 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
                  struct v4l2_buffer *buf);
 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
                   struct v4l2_buffer *buf);
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+                        struct v4l2_create_buffers *create);
 
 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
                   struct v4l2_exportbuffer *eb);
index 15d11a39be47d9b00b0890ca4dab2c3a93b4fe96..6797b9de90edeac929c5c234558434cc16d1d877 100644 (file)
@@ -290,13 +290,14 @@ DEFINE_EVENT(ext3__page_op, ext3_releasepage,
 );
 
 TRACE_EVENT(ext3_invalidatepage,
-       TP_PROTO(struct page *page, unsigned long offset),
+       TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
 
-       TP_ARGS(page, offset),
+       TP_ARGS(page, offset, length),
 
        TP_STRUCT__entry(
                __field(        pgoff_t, index                  )
-               __field(        unsigned long, offset           )
+               __field(        unsigned int, offset            )
+               __field(        unsigned int, length            )
                __field(        ino_t,  ino                     )
                __field(        dev_t,  dev                     )
 
@@ -305,14 +306,15 @@ TRACE_EVENT(ext3_invalidatepage,
        TP_fast_assign(
                __entry->index  = page->index;
                __entry->offset = offset;
+               __entry->length = length;
                __entry->ino    = page->mapping->host->i_ino;
                __entry->dev    = page->mapping->host->i_sb->s_dev;
        ),
 
-       TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
+       TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->index, __entry->offset)
+                 __entry->index, __entry->offset, __entry->length)
 );
 
 TRACE_EVENT(ext3_discard_blocks,
index 8ee15b97cd389113859e614e6135a8e1eb6916b5..2068db241f2204a44018f8af22225a952dd58c29 100644 (file)
@@ -19,6 +19,57 @@ struct extent_status;
 
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
+#define show_mballoc_flags(flags) __print_flags(flags, "|",    \
+       { EXT4_MB_HINT_MERGE,           "HINT_MERGE" },         \
+       { EXT4_MB_HINT_RESERVED,        "HINT_RESV" },          \
+       { EXT4_MB_HINT_METADATA,        "HINT_MDATA" },         \
+       { EXT4_MB_HINT_FIRST,           "HINT_FIRST" },         \
+       { EXT4_MB_HINT_BEST,            "HINT_BEST" },          \
+       { EXT4_MB_HINT_DATA,            "HINT_DATA" },          \
+       { EXT4_MB_HINT_NOPREALLOC,      "HINT_NOPREALLOC" },    \
+       { EXT4_MB_HINT_GROUP_ALLOC,     "HINT_GRP_ALLOC" },     \
+       { EXT4_MB_HINT_GOAL_ONLY,       "HINT_GOAL_ONLY" },     \
+       { EXT4_MB_HINT_TRY_GOAL,        "HINT_TRY_GOAL" },      \
+       { EXT4_MB_DELALLOC_RESERVED,    "DELALLOC_RESV" },      \
+       { EXT4_MB_STREAM_ALLOC,         "STREAM_ALLOC" },       \
+       { EXT4_MB_USE_ROOT_BLOCKS,      "USE_ROOT_BLKS" },      \
+       { EXT4_MB_USE_RESERVED,         "USE_RESV" })
+
+#define show_map_flags(flags) __print_flags(flags, "|",                        \
+       { EXT4_GET_BLOCKS_CREATE,               "CREATE" },             \
+       { EXT4_GET_BLOCKS_UNINIT_EXT,           "UNINIT" },             \
+       { EXT4_GET_BLOCKS_DELALLOC_RESERVE,     "DELALLOC" },           \
+       { EXT4_GET_BLOCKS_PRE_IO,               "PRE_IO" },             \
+       { EXT4_GET_BLOCKS_CONVERT,              "CONVERT" },            \
+       { EXT4_GET_BLOCKS_METADATA_NOFAIL,      "METADATA_NOFAIL" },    \
+       { EXT4_GET_BLOCKS_NO_NORMALIZE,         "NO_NORMALIZE" },       \
+       { EXT4_GET_BLOCKS_KEEP_SIZE,            "KEEP_SIZE" },          \
+       { EXT4_GET_BLOCKS_NO_LOCK,              "NO_LOCK" },            \
+       { EXT4_GET_BLOCKS_NO_PUT_HOLE,          "NO_PUT_HOLE" })
+
+#define show_mflags(flags) __print_flags(flags, "",    \
+       { EXT4_MAP_NEW,         "N" },                  \
+       { EXT4_MAP_MAPPED,      "M" },                  \
+       { EXT4_MAP_UNWRITTEN,   "U" },                  \
+       { EXT4_MAP_BOUNDARY,    "B" },                  \
+       { EXT4_MAP_UNINIT,      "u" },                  \
+       { EXT4_MAP_FROM_CLUSTER, "C" })
+
+#define show_free_flags(flags) __print_flags(flags, "|",       \
+       { EXT4_FREE_BLOCKS_METADATA,            "METADATA" },   \
+       { EXT4_FREE_BLOCKS_FORGET,              "FORGET" },     \
+       { EXT4_FREE_BLOCKS_VALIDATED,           "VALIDATED" },  \
+       { EXT4_FREE_BLOCKS_NO_QUOT_UPDATE,      "NO_QUOTA" },   \
+       { EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\
+       { EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" })
+
+#define show_extent_status(status) __print_flags(status, "",   \
+       { (1 << 3),     "W" },                                  \
+       { (1 << 2),     "U" },                                  \
+       { (1 << 1),     "D" },                                  \
+       { (1 << 0),     "H" })
+
+
 TRACE_EVENT(ext4_free_inode,
        TP_PROTO(struct inode *inode),
 
@@ -281,7 +332,7 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
        TP_ARGS(inode, pos, len, copied)
 );
 
-TRACE_EVENT(ext4_da_writepages,
+TRACE_EVENT(ext4_writepages,
        TP_PROTO(struct inode *inode, struct writeback_control *wbc),
 
        TP_ARGS(inode, wbc),
@@ -324,46 +375,62 @@ TRACE_EVENT(ext4_da_writepages,
 );
 
 TRACE_EVENT(ext4_da_write_pages,
-       TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
+       TP_PROTO(struct inode *inode, pgoff_t first_page,
+                struct writeback_control *wbc),
 
-       TP_ARGS(inode, mpd),
+       TP_ARGS(inode, first_page, wbc),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
                __field(        ino_t,  ino                     )
-               __field(        __u64,  b_blocknr               )
-               __field(        __u32,  b_size                  )
-               __field(        __u32,  b_state                 )
-               __field(        unsigned long,  first_page      )
-               __field(        int,    io_done                 )
-               __field(        int,    pages_written           )
-               __field(        int,    sync_mode               )
+               __field(      pgoff_t,  first_page              )
+               __field(         long,  nr_to_write             )
+               __field(          int,  sync_mode               )
        ),
 
        TP_fast_assign(
                __entry->dev            = inode->i_sb->s_dev;
                __entry->ino            = inode->i_ino;
-               __entry->b_blocknr      = mpd->b_blocknr;
-               __entry->b_size         = mpd->b_size;
-               __entry->b_state        = mpd->b_state;
-               __entry->first_page     = mpd->first_page;
-               __entry->io_done        = mpd->io_done;
-               __entry->pages_written  = mpd->pages_written;
-               __entry->sync_mode      = mpd->wbc->sync_mode;
+               __entry->first_page     = first_page;
+               __entry->nr_to_write    = wbc->nr_to_write;
+               __entry->sync_mode      = wbc->sync_mode;
        ),
 
-       TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
-                 "first_page %lu io_done %d pages_written %d sync_mode %d",
+       TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
+                 "sync_mode %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 __entry->b_blocknr, __entry->b_size,
-                 __entry->b_state, __entry->first_page,
-                 __entry->io_done, __entry->pages_written,
-                 __entry->sync_mode
-                  )
+                 (unsigned long) __entry->ino, __entry->first_page,
+                 __entry->nr_to_write, __entry->sync_mode)
 );
 
-TRACE_EVENT(ext4_da_writepages_result,
+TRACE_EVENT(ext4_da_write_pages_extent,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
+
+       TP_ARGS(inode, map),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u64,  lblk                    )
+               __field(        __u32,  len                     )
+               __field(        __u32,  flags                   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->lblk           = map->m_lblk;
+               __entry->len            = map->m_len;
+               __entry->flags          = map->m_flags;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->lblk, __entry->len,
+                 show_mflags(__entry->flags))
+);
+
+TRACE_EVENT(ext4_writepages_result,
        TP_PROTO(struct inode *inode, struct writeback_control *wbc,
                        int ret, int pages_written),
 
@@ -444,16 +511,16 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
 );
 
 DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
-       TP_PROTO(struct page *page, unsigned long offset),
+       TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
 
-       TP_ARGS(page, offset),
+       TP_ARGS(page, offset, length),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
                __field(        ino_t,  ino                     )
                __field(        pgoff_t, index                  )
-               __field(        unsigned long, offset           )
-
+               __field(        unsigned int, offset            )
+               __field(        unsigned int, length            )
        ),
 
        TP_fast_assign(
@@ -461,24 +528,26 @@ DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
                __entry->ino    = page->mapping->host->i_ino;
                __entry->index  = page->index;
                __entry->offset = offset;
+               __entry->length = length;
        ),
 
-       TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
+       TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned long) __entry->index, __entry->offset)
+                 (unsigned long) __entry->index,
+                 __entry->offset, __entry->length)
 );
 
 DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
-       TP_PROTO(struct page *page, unsigned long offset),
+       TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
 
-       TP_ARGS(page, offset)
+       TP_ARGS(page, offset, length)
 );
 
 DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
-       TP_PROTO(struct page *page, unsigned long offset),
+       TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
 
-       TP_ARGS(page, offset)
+       TP_ARGS(page, offset, length)
 );
 
 TRACE_EVENT(ext4_discard_blocks,
@@ -673,10 +742,10 @@ TRACE_EVENT(ext4_request_blocks,
                __entry->flags  = ar->flags;
        ),
 
-       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+       TP_printk("dev %d,%d ino %lu flags %s len %u lblk %u goal %llu "
                  "lleft %u lright %u pleft %llu pright %llu ",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, __entry->flags,
+                 (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
                  __entry->len, __entry->logical, __entry->goal,
                  __entry->lleft, __entry->lright, __entry->pleft,
                  __entry->pright)
@@ -715,10 +784,10 @@ TRACE_EVENT(ext4_allocate_blocks,
                __entry->flags  = ar->flags;
        ),
 
-       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+       TP_printk("dev %d,%d ino %lu flags %s len %u block %llu lblk %u "
                  "goal %llu lleft %u lright %u pleft %llu pright %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, __entry->flags,
+                 (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
                  __entry->len, __entry->block, __entry->logical,
                  __entry->goal,  __entry->lleft, __entry->lright,
                  __entry->pleft, __entry->pright)
@@ -748,11 +817,11 @@ TRACE_EVENT(ext4_free_blocks,
                __entry->mode           = inode->i_mode;
        ),
 
-       TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
+       TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->mode, __entry->block, __entry->count,
-                 __entry->flags)
+                 show_free_flags(__entry->flags))
 );
 
 TRACE_EVENT(ext4_sync_file_enter,
@@ -903,7 +972,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
        ),
 
        TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
-                 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
+                 "result %u/%d/%u@%u blks %u grps %u cr %u flags %s "
                  "tail %u broken %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
@@ -914,7 +983,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
                  __entry->result_group, __entry->result_start,
                  __entry->result_len, __entry->result_logical,
                  __entry->found, __entry->groups, __entry->cr,
-                 __entry->flags, __entry->tail,
+                 show_mballoc_flags(__entry->flags), __entry->tail,
                  __entry->buddy ? 1 << __entry->buddy : 0)
 );
 
@@ -1528,10 +1597,10 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
                __entry->flags  = flags;
        ),
 
-       TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
+       TP_printk("dev %d,%d ino %lu lblk %u len %u flags %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->lblk, __entry->len, __entry->flags)
+                 __entry->lblk, __entry->len, show_map_flags(__entry->flags))
 );
 
 DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1549,47 +1618,53 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
 );
 
 DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
-       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
+       TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map,
+                int ret),
 
-       TP_ARGS(inode, map, ret),
+       TP_ARGS(inode, flags, map, ret),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev             )
                __field(        ino_t,          ino             )
+               __field(        unsigned int,   flags           )
                __field(        ext4_fsblk_t,   pblk            )
                __field(        ext4_lblk_t,    lblk            )
                __field(        unsigned int,   len             )
-               __field(        unsigned int,   flags           )
+               __field(        unsigned int,   mflags          )
                __field(        int,            ret             )
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
+               __entry->flags  = flags;
                __entry->pblk   = map->m_pblk;
                __entry->lblk   = map->m_lblk;
                __entry->len    = map->m_len;
-               __entry->flags  = map->m_flags;
+               __entry->mflags = map->m_flags;
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u flags %x ret %d",
+       TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u "
+                 "mflags %s ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->lblk, __entry->pblk,
-                 __entry->len, __entry->flags, __entry->ret)
+                 show_map_flags(__entry->flags), __entry->lblk, __entry->pblk,
+                 __entry->len, show_mflags(__entry->mflags), __entry->ret)
 );
 
 DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
-       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
+       TP_PROTO(struct inode *inode, unsigned flags,
+                struct ext4_map_blocks *map, int ret),
 
-       TP_ARGS(inode, map, ret)
+       TP_ARGS(inode, flags, map, ret)
 );
 
 DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
-       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
+       TP_PROTO(struct inode *inode, unsigned flags,
+                struct ext4_map_blocks *map, int ret),
 
-       TP_ARGS(inode, map, ret)
+       TP_ARGS(inode, flags, map, ret)
 );
 
 TRACE_EVENT(ext4_ext_load_extent,
@@ -1638,25 +1713,50 @@ TRACE_EVENT(ext4_load_inode,
 );
 
 TRACE_EVENT(ext4_journal_start,
-       TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
+       TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
+                unsigned long IP),
 
-       TP_ARGS(sb, nblocks, IP),
+       TP_ARGS(sb, blocks, rsv_blocks, IP),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
                __field(unsigned long,  ip                      )
-               __field(        int,    nblocks                 )
+               __field(          int,  blocks                  )
+               __field(          int,  rsv_blocks              )
        ),
 
        TP_fast_assign(
-               __entry->dev     = sb->s_dev;
-               __entry->ip      = IP;
-               __entry->nblocks = nblocks;
+               __entry->dev             = sb->s_dev;
+               __entry->ip              = IP;
+               __entry->blocks          = blocks;
+               __entry->rsv_blocks      = rsv_blocks;
        ),
 
-       TP_printk("dev %d,%d nblocks %d caller %pF",
+       TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->nblocks, (void *)__entry->ip)
+                 __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_journal_start_reserved,
+       TP_PROTO(struct super_block *sb, int blocks, unsigned long IP),
+
+       TP_ARGS(sb, blocks, IP),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(unsigned long,  ip                      )
+               __field(          int,  blocks                  )
+       ),
+
+       TP_fast_assign(
+               __entry->dev             = sb->s_dev;
+               __entry->ip              = IP;
+               __entry->blocks          = blocks;
+       ),
+
+       TP_printk("dev %d,%d blocks, %d caller %pF",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->blocks, (void *)__entry->ip)
 );
 
 DECLARE_EVENT_CLASS(ext4__trim,
@@ -1736,12 +1836,12 @@ TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
                __entry->newblk         = newblock;
        ),
 
-       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %x "
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %s "
                  "allocated %d newblock %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
-                 __entry->len, __entry->flags,
+                 __entry->len, show_map_flags(__entry->flags),
                  (unsigned int) __entry->allocated,
                  (unsigned long long) __entry->newblk)
 );
@@ -1769,10 +1869,10 @@ TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
+       TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %s ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->lblk, (unsigned long long) __entry->pblk,
-                 __entry->len, __entry->flags, __entry->ret)
+                 __entry->len, show_mflags(__entry->flags), __entry->ret)
 );
 
 TRACE_EVENT(ext4_ext_put_in_cache,
@@ -1926,7 +2026,7 @@ TRACE_EVENT(ext4_ext_show_extent,
 TRACE_EVENT(ext4_remove_blocks,
            TP_PROTO(struct inode *inode, struct ext4_extent *ex,
                ext4_lblk_t from, ext4_fsblk_t to,
-               ext4_fsblk_t partial_cluster),
+               long long partial_cluster),
 
        TP_ARGS(inode, ex, from, to, partial_cluster),
 
@@ -1935,7 +2035,7 @@ TRACE_EVENT(ext4_remove_blocks,
                __field(        ino_t,          ino     )
                __field(        ext4_lblk_t,    from    )
                __field(        ext4_lblk_t,    to      )
-               __field(        ext4_fsblk_t,   partial )
+               __field(        long long,      partial )
                __field(        ext4_fsblk_t,   ee_pblk )
                __field(        ext4_lblk_t,    ee_lblk )
                __field(        unsigned short, ee_len  )
@@ -1953,7 +2053,7 @@ TRACE_EVENT(ext4_remove_blocks,
        ),
 
        TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
-                 "from %u to %u partial_cluster %u",
+                 "from %u to %u partial_cluster %lld",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  (unsigned) __entry->ee_lblk,
@@ -1961,19 +2061,20 @@ TRACE_EVENT(ext4_remove_blocks,
                  (unsigned short) __entry->ee_len,
                  (unsigned) __entry->from,
                  (unsigned) __entry->to,
-                 (unsigned) __entry->partial)
+                 (long long) __entry->partial)
 );
 
 TRACE_EVENT(ext4_ext_rm_leaf,
        TP_PROTO(struct inode *inode, ext4_lblk_t start,
-                struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
+                struct ext4_extent *ex,
+                long long partial_cluster),
 
        TP_ARGS(inode, start, ex, partial_cluster),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev     )
                __field(        ino_t,          ino     )
-               __field(        ext4_fsblk_t,   partial )
+               __field(        long long,      partial )
                __field(        ext4_lblk_t,    start   )
                __field(        ext4_lblk_t,    ee_lblk )
                __field(        ext4_fsblk_t,   ee_pblk )
@@ -1991,14 +2092,14 @@ TRACE_EVENT(ext4_ext_rm_leaf,
        ),
 
        TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
-                 "partial_cluster %u",
+                 "partial_cluster %lld",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  (unsigned) __entry->start,
                  (unsigned) __entry->ee_lblk,
                  (unsigned long long) __entry->ee_pblk,
                  (unsigned short) __entry->ee_len,
-                 (unsigned) __entry->partial)
+                 (long long) __entry->partial)
 );
 
 TRACE_EVENT(ext4_ext_rm_idx,
@@ -2025,14 +2126,16 @@ TRACE_EVENT(ext4_ext_rm_idx,
 );
 
 TRACE_EVENT(ext4_ext_remove_space,
-       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
+       TP_PROTO(struct inode *inode, ext4_lblk_t start,
+                ext4_lblk_t end, int depth),
 
-       TP_ARGS(inode, start, depth),
+       TP_ARGS(inode, start, end, depth),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev     )
                __field(        ino_t,          ino     )
                __field(        ext4_lblk_t,    start   )
+               __field(        ext4_lblk_t,    end     )
                __field(        int,            depth   )
        ),
 
@@ -2040,28 +2143,31 @@ TRACE_EVENT(ext4_ext_remove_space,
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
                __entry->start  = start;
+               __entry->end    = end;
                __entry->depth  = depth;
        ),
 
-       TP_printk("dev %d,%d ino %lu since %u depth %d",
+       TP_printk("dev %d,%d ino %lu since %u end %u depth %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  (unsigned) __entry->start,
+                 (unsigned) __entry->end,
                  __entry->depth)
 );
 
 TRACE_EVENT(ext4_ext_remove_space_done,
-       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
-               ext4_lblk_t partial, __le16 eh_entries),
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end,
+                int depth, long long partial, __le16 eh_entries),
 
-       TP_ARGS(inode, start, depth, partial, eh_entries),
+       TP_ARGS(inode, start, end, depth, partial, eh_entries),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev             )
                __field(        ino_t,          ino             )
                __field(        ext4_lblk_t,    start           )
+               __field(        ext4_lblk_t,    end             )
                __field(        int,            depth           )
-               __field(        ext4_lblk_t,    partial         )
+               __field(        long long,      partial         )
                __field(        unsigned short, eh_entries      )
        ),
 
@@ -2069,18 +2175,20 @@ TRACE_EVENT(ext4_ext_remove_space_done,
                __entry->dev            = inode->i_sb->s_dev;
                __entry->ino            = inode->i_ino;
                __entry->start          = start;
+               __entry->end            = end;
                __entry->depth          = depth;
                __entry->partial        = partial;
                __entry->eh_entries     = le16_to_cpu(eh_entries);
        ),
 
-       TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
+       TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld "
                  "remaining_entries %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  (unsigned) __entry->start,
+                 (unsigned) __entry->end,
                  __entry->depth,
-                 (unsigned) __entry->partial,
+                 (long long) __entry->partial,
                  (unsigned short) __entry->eh_entries)
 );
 
@@ -2095,7 +2203,7 @@ TRACE_EVENT(ext4_es_insert_extent,
                __field(        ext4_lblk_t,    lblk            )
                __field(        ext4_lblk_t,    len             )
                __field(        ext4_fsblk_t,   pblk            )
-               __field(        unsigned long long, status      )
+               __field(        char, status    )
        ),
 
        TP_fast_assign(
@@ -2104,14 +2212,14 @@ TRACE_EVENT(ext4_es_insert_extent,
                __entry->lblk   = es->es_lblk;
                __entry->len    = es->es_len;
                __entry->pblk   = ext4_es_pblock(es);
-               __entry->status = ext4_es_status(es);
+               __entry->status = ext4_es_status(es) >> 60;
        ),
 
-       TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
+       TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->lblk, __entry->len,
-                 __entry->pblk, __entry->status)
+                 __entry->pblk, show_extent_status(__entry->status))
 );
 
 TRACE_EVENT(ext4_es_remove_extent,
@@ -2172,7 +2280,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
                __field(        ext4_lblk_t,    lblk            )
                __field(        ext4_lblk_t,    len             )
                __field(        ext4_fsblk_t,   pblk            )
-               __field(        unsigned long long, status      )
+               __field(        char, status    )
        ),
 
        TP_fast_assign(
@@ -2181,14 +2289,14 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
                __entry->lblk   = es->es_lblk;
                __entry->len    = es->es_len;
                __entry->pblk   = ext4_es_pblock(es);
-               __entry->status = ext4_es_status(es);
+               __entry->status = ext4_es_status(es) >> 60;
        ),
 
-       TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
+       TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->lblk, __entry->len,
-                 __entry->pblk, __entry->status)
+                 __entry->pblk, show_extent_status(__entry->status))
 );
 
 TRACE_EVENT(ext4_es_lookup_extent_enter,
@@ -2225,7 +2333,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
                __field(        ext4_lblk_t,    lblk            )
                __field(        ext4_lblk_t,    len             )
                __field(        ext4_fsblk_t,   pblk            )
-               __field(        unsigned long long,     status  )
+               __field(        char,           status          )
                __field(        int,            found           )
        ),
 
@@ -2235,16 +2343,16 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
                __entry->lblk   = es->es_lblk;
                __entry->len    = es->es_len;
                __entry->pblk   = ext4_es_pblock(es);
-               __entry->status = ext4_es_status(es);
+               __entry->status = ext4_es_status(es) >> 60;
                __entry->found  = found;
        ),
 
-       TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %llx",
+       TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino, __entry->found,
                  __entry->lblk, __entry->len,
                  __entry->found ? __entry->pblk : 0,
-                 __entry->found ? __entry->status : 0)
+                 show_extent_status(__entry->found ? __entry->status : 0))
 );
 
 TRACE_EVENT(ext4_es_shrink_enter,
index ab5d4992e568f31109c41c505c3d919216a61acb..bdc6e87ff3eb379cf223a54d6976f36ce96e7fab 100644 (file)
@@ -261,6 +261,7 @@ header-y += net_dropmon.h
 header-y += net_tstamp.h
 header-y += netconf.h
 header-y += netdevice.h
+header-y += netlink_diag.h
 header-y += netfilter.h
 header-y += netfilter_arp.h
 header-y += netfilter_bridge.h
index 74c2bf7211f85ed0a55f82987d690cd6bc9aad7c..c8eaeb5465ef76e4ec8992f45e6523a72ebc258c 100644 (file)
 /* Rocketport EXPRESS/INFINITY */
 #define PORT_RP2       102
 
+/* Freescale lpuart */
+#define PORT_LPUART    103
+
 #endif /* _UAPILINUX_SERIAL_CORE_H */
index 65349f07b8782a19f13014e66b8ff8ac0e7a5ecf..383f8231e43676b5c5d3ddcaca51298bd9f42aa3 100644 (file)
@@ -15,7 +15,6 @@
  */
 
 #include <linux/context_tracking.h>
-#include <linux/kvm_host.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
 #include <linux/hardirq.h>
@@ -71,6 +70,46 @@ void user_enter(void)
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_PREEMPT
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+void __sched notrace preempt_schedule_context(void)
+{
+       struct thread_info *ti = current_thread_info();
+       enum ctx_state prev_ctx;
+
+       if (likely(ti->preempt_count || irqs_disabled()))
+               return;
+
+       /*
+        * Need to disable preemption in case user_exit() is traced
+        * and the tracer calls preempt_enable_notrace() causing
+        * an infinite recursion.
+        */
+       preempt_disable_notrace();
+       prev_ctx = exception_enter();
+       preempt_enable_no_resched_notrace();
+
+       preempt_schedule();
+
+       preempt_disable_notrace();
+       exception_exit(prev_ctx);
+       preempt_enable_notrace();
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_PREEMPT */
 
 /**
  * user_exit - Inform the context tracking that the CPU is
index d5585f5e038ee1080f17777792a6927d5897e5ea..e695c0a0bcb5c84e79ee93e80de57e4fbab69b33 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/cpu.h>
 #include <linux/tick.h>
 #include <linux/mm.h>
+#include <linux/stackprotector.h>
 
 #include <asm/tlb.h>
 
@@ -58,6 +59,7 @@ void __weak arch_cpu_idle_dead(void) { }
 void __weak arch_cpu_idle(void)
 {
        cpu_idle_force_poll = 1;
+       local_irq_enable();
 }
 
 /*
@@ -112,6 +114,21 @@ static void cpu_idle_loop(void)
 
 void cpu_startup_entry(enum cpuhp_state state)
 {
+       /*
+        * This #ifdef needs to die, but it's too late in the cycle to
+        * make this generic (arm and sh have never invoked the canary
+        * init for the non boot cpus!). Will be fixed in 3.11
+        */
+#ifdef CONFIG_X86
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us. The boot CPU already has it initialized but no harm
+        * in doing it again. This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+#endif
        current_set_polling();
        arch_cpu_idle_prepare();
        cpu_idle_loop();
index 9dc297faf7c01b68cbb1a7a24444b99c300cb8a3..b391907d53520cb4a126bc8e3e0a2d5a64ade54d 100644 (file)
@@ -196,9 +196,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
-static void ring_buffer_attach(struct perf_event *event,
-                              struct ring_buffer *rb);
-
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2918,6 +2915,7 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
 
 static void free_event(struct perf_event *event)
 {
@@ -2942,15 +2940,30 @@ static void free_event(struct perf_event *event)
                if (has_branch_stack(event)) {
                        static_key_slow_dec_deferred(&perf_sched_events);
                        /* is system-wide event */
-                       if (!(event->attach_state & PERF_ATTACH_TASK))
+                       if (!(event->attach_state & PERF_ATTACH_TASK)) {
                                atomic_dec(&per_cpu(perf_branch_stack_events,
                                                    event->cpu));
+                       }
                }
        }
 
        if (event->rb) {
-               ring_buffer_put(event->rb);
-               event->rb = NULL;
+               struct ring_buffer *rb;
+
+               /*
+                * Can happen when we close an event with re-directed output.
+                *
+                * Since we have a 0 refcount, perf_mmap_close() will skip
+                * over us; possibly making our ring_buffer_put() the last.
+                */
+               mutex_lock(&event->mmap_mutex);
+               rb = event->rb;
+               if (rb) {
+                       rcu_assign_pointer(event->rb, NULL);
+                       ring_buffer_detach(event, rb);
+                       ring_buffer_put(rb); /* could be last */
+               }
+               mutex_unlock(&event->mmap_mutex);
        }
 
        if (is_cgroup_event(event))
@@ -3188,30 +3201,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        unsigned int events = POLL_HUP;
 
        /*
-        * Race between perf_event_set_output() and perf_poll(): perf_poll()
-        * grabs the rb reference but perf_event_set_output() overrides it.
-        * Here is the timeline for two threads T1, T2:
-        * t0: T1, rb = rcu_dereference(event->rb)
-        * t1: T2, old_rb = event->rb
-        * t2: T2, event->rb = new rb
-        * t3: T2, ring_buffer_detach(old_rb)
-        * t4: T1, ring_buffer_attach(rb1)
-        * t5: T1, poll_wait(event->waitq)
-        *
-        * To avoid this problem, we grab mmap_mutex in perf_poll()
-        * thereby ensuring that the assignment of the new ring buffer
-        * and the detachment of the old buffer appear atomic to perf_poll()
+        * Pin the event->rb by taking event->mmap_mutex; otherwise
+        * perf_event_set_output() can swizzle our rb and make us miss wakeups.
         */
        mutex_lock(&event->mmap_mutex);
-
-       rcu_read_lock();
-       rb = rcu_dereference(event->rb);
-       if (rb) {
-               ring_buffer_attach(event, rb);
+       rb = event->rb;
+       if (rb)
                events = atomic_xchg(&rb->poll, 0);
-       }
-       rcu_read_unlock();
-
        mutex_unlock(&event->mmap_mutex);
 
        poll_wait(file, &event->waitq, wait);
@@ -3521,16 +3517,12 @@ static void ring_buffer_attach(struct perf_event *event,
                return;
 
        spin_lock_irqsave(&rb->event_lock, flags);
-       if (!list_empty(&event->rb_entry))
-               goto unlock;
-
-       list_add(&event->rb_entry, &rb->event_list);
-unlock:
+       if (list_empty(&event->rb_entry))
+               list_add(&event->rb_entry, &rb->event_list);
        spin_unlock_irqrestore(&rb->event_lock, flags);
 }
 
-static void ring_buffer_detach(struct perf_event *event,
-                              struct ring_buffer *rb)
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
 {
        unsigned long flags;
 
@@ -3549,13 +3541,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
 
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       if (!rb)
-               goto unlock;
-
-       list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
-               wake_up_all(&event->waitq);
-
-unlock:
+       if (rb) {
+               list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+                       wake_up_all(&event->waitq);
+       }
        rcu_read_unlock();
 }
 
@@ -3584,18 +3573,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
-       struct perf_event *event, *n;
-       unsigned long flags;
-
        if (!atomic_dec_and_test(&rb->refcount))
                return;
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
-               list_del_init(&event->rb_entry);
-               wake_up_all(&event->waitq);
-       }
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       WARN_ON_ONCE(!list_empty(&rb->event_list));
 
        call_rcu(&rb->rcu_head, rb_free_rcu);
 }
@@ -3605,26 +3586,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
        struct perf_event *event = vma->vm_file->private_data;
 
        atomic_inc(&event->mmap_count);
+       atomic_inc(&event->rb->mmap_count);
 }
 
+/*
+ * A buffer can be mmap()ed multiple times; either directly through the same
+ * event, or through other events by use of perf_event_set_output().
+ *
+ * In order to undo the VM accounting done by perf_mmap() we need to destroy
+ * the buffer here, where we still have a VM context. This means we need
+ * to detach all events redirecting to us.
+ */
 static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
-               unsigned long size = perf_data_size(event->rb);
-               struct user_struct *user = event->mmap_user;
-               struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = event->rb;
+       struct user_struct *mmap_user = rb->mmap_user;
+       int mmap_locked = rb->mmap_locked;
+       unsigned long size = perf_data_size(rb);
+
+       atomic_dec(&rb->mmap_count);
+
+       if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+               return;
 
-               atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-               vma->vm_mm->pinned_vm -= event->mmap_locked;
-               rcu_assign_pointer(event->rb, NULL);
-               ring_buffer_detach(event, rb);
+       /* Detach current event from the buffer. */
+       rcu_assign_pointer(event->rb, NULL);
+       ring_buffer_detach(event, rb);
+       mutex_unlock(&event->mmap_mutex);
+
+       /* If there's still other mmap()s of this buffer, we're done. */
+       if (atomic_read(&rb->mmap_count)) {
+               ring_buffer_put(rb); /* can't be last */
+               return;
+       }
+
+       /*
+        * No other mmap()s, detach from all other events that might redirect
+        * into the now unreachable buffer. Somewhat complicated by the
+        * fact that rb::event_lock otherwise nests inside mmap_mutex.
+        */
+again:
+       rcu_read_lock();
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+               if (!atomic_long_inc_not_zero(&event->refcount)) {
+                       /*
+                        * This event is en-route to free_event() which will
+                        * detach it and remove it from the list.
+                        */
+                       continue;
+               }
+               rcu_read_unlock();
+
+               mutex_lock(&event->mmap_mutex);
+               /*
+                * Check we didn't race with perf_event_set_output() which can
+                * swizzle the rb from under us while we were waiting to
+                * acquire mmap_mutex.
+                *
+                * If we find a different rb; ignore this event, a next
+                * iteration will no longer find it on the list. We have to
+                * still restart the iteration to make sure we're not now
+                * iterating the wrong list.
+                */
+               if (event->rb == rb) {
+                       rcu_assign_pointer(event->rb, NULL);
+                       ring_buffer_detach(event, rb);
+                       ring_buffer_put(rb); /* can't be last, we still have one */
+               }
                mutex_unlock(&event->mmap_mutex);
+               put_event(event);
 
-               ring_buffer_put(rb);
-               free_uid(user);
+               /*
+                * Restart the iteration; either we're on the wrong list or
+                * destroyed its integrity by doing a deletion.
+                */
+               goto again;
        }
+       rcu_read_unlock();
+
+       /*
+        * It could be there's still a few 0-ref events on the list; they'll
+        * get cleaned up by free_event() -- they'll also still have their
+        * ref on the rb and will free it whenever they are done with it.
+        *
+        * Aside from that, this buffer is 'fully' detached and unmapped,
+        * undo the VM accounting.
+        */
+
+       atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+       vma->vm_mm->pinned_vm -= mmap_locked;
+       free_uid(mmap_user);
+
+       ring_buffer_put(rb); /* could be last */
 }
 
 static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3674,12 +3729,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
+again:
        mutex_lock(&event->mmap_mutex);
        if (event->rb) {
-               if (event->rb->nr_pages == nr_pages)
-                       atomic_inc(&event->rb->refcount);
-               else
+               if (event->rb->nr_pages != nr_pages) {
                        ret = -EINVAL;
+                       goto unlock;
+               }
+
+               if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
+                       /*
+                        * Raced against perf_mmap_close() through
+                        * perf_event_set_output(). Try again, hope for better
+                        * luck.
+                        */
+                       mutex_unlock(&event->mmap_mutex);
+                       goto again;
+               }
+
                goto unlock;
        }
 
@@ -3720,12 +3787,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                ret = -ENOMEM;
                goto unlock;
        }
-       rcu_assign_pointer(event->rb, rb);
+
+       atomic_set(&rb->mmap_count, 1);
+       rb->mmap_locked = extra;
+       rb->mmap_user = get_current_user();
 
        atomic_long_add(user_extra, &user->locked_vm);
-       event->mmap_locked = extra;
-       event->mmap_user = get_current_user();
-       vma->vm_mm->pinned_vm += event->mmap_locked;
+       vma->vm_mm->pinned_vm += extra;
+
+       ring_buffer_attach(event, rb);
+       rcu_assign_pointer(event->rb, rb);
 
        perf_event_update_userpage(event);
 
@@ -3734,7 +3805,11 @@ unlock:
                atomic_inc(&event->mmap_count);
        mutex_unlock(&event->mmap_mutex);
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       /*
+        * Since pinned accounting is per vm we cannot allow fork() to copy our
+        * vma.
+        */
+       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &perf_mmap_vmops;
 
        return ret;
@@ -6412,6 +6487,8 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
+       old_rb = event->rb;
+
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6419,16 +6496,28 @@ set:
                        goto unlock;
        }
 
-       old_rb = event->rb;
-       rcu_assign_pointer(event->rb, rb);
        if (old_rb)
                ring_buffer_detach(event, old_rb);
+
+       if (rb)
+               ring_buffer_attach(event, rb);
+
+       rcu_assign_pointer(event->rb, rb);
+
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
+
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
 
-       if (old_rb)
-               ring_buffer_put(old_rb);
 out:
        return ret;
 }
index a64f8aeb5c1f5adae53fde406b3d904e1485aa80..20185ea64aa6f952022d0e8a1f64ffea044526da 100644 (file)
@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
        list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
                if (iter->hw.bp_target == tsk &&
                    find_slot_idx(iter) == type &&
-                   cpu == iter->cpu)
+                   (iter->cpu < 0 || cpu == iter->cpu))
                        count += hw_breakpoint_weight(iter);
        }
 
@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
                return;
        }
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                unsigned int nr;
 
                nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
        if (cpu >= 0) {
                toggle_bp_task_slot(bp, cpu, enable, type, weight);
        } else {
-               for_each_online_cpu(cpu)
+               for_each_possible_cpu(cpu)
                        toggle_bp_task_slot(bp, cpu, enable, type, weight);
        }
 
index eb675c4d59dfd440b9f93d92226ee2dc5ffe8290..ca6599723be5624cdeb4a3cad5ef5dd10a4e1c7b 100644 (file)
@@ -31,6 +31,10 @@ struct ring_buffer {
        spinlock_t                      event_lock;
        struct list_head                event_list;
 
+       atomic_t                        mmap_count;
+       unsigned long                   mmap_locked;
+       struct user_struct              *mmap_user;
+
        struct perf_event_mmap_page     *user_page;
        void                            *data_pages[0];
 };
index 3fed7f0cbcdfe3d2149dd903e6912628ba58da95..bddf3b201a480015e2b1b15c217c43ea6796fcd0 100644 (file)
@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
 /* Optimization staging list, protected by kprobe_mutex */
 static LIST_HEAD(optimizing_list);
 static LIST_HEAD(unoptimizing_list);
+static LIST_HEAD(freeing_list);
 
 static void kprobe_optimizer(struct work_struct *work);
 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void)
  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
  * if need) kprobes listed on unoptimizing_list.
  */
-static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
+static __kprobes void do_unoptimize_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
        /* Ditto to do_optimize_kprobes */
        get_online_cpus();
        mutex_lock(&text_mutex);
-       arch_unoptimize_kprobes(&unoptimizing_list, free_list);
+       arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
-       list_for_each_entry_safe(op, tmp, free_list, list) {
+       list_for_each_entry_safe(op, tmp, &freeing_list, list) {
                /* Disarm probes if marked disabled */
                if (kprobe_disabled(&op->kp))
                        arch_disarm_kprobe(&op->kp);
@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
 }
 
 /* Reclaim all kprobes on the free_list */
-static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
+static __kprobes void do_free_cleaned_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
-       list_for_each_entry_safe(op, tmp, free_list, list) {
+       list_for_each_entry_safe(op, tmp, &freeing_list, list) {
                BUG_ON(!kprobe_unused(&op->kp));
                list_del_init(&op->list);
                free_aggr_kprobe(&op->kp);
@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void)
 /* Kprobe jump optimizer */
 static __kprobes void kprobe_optimizer(struct work_struct *work)
 {
-       LIST_HEAD(free_list);
-
        mutex_lock(&kprobe_mutex);
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
         * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
         * kprobes before waiting for quiesence period.
         */
-       do_unoptimize_kprobes(&free_list);
+       do_unoptimize_kprobes();
 
        /*
         * Step 2: Wait for quiesence period to ensure all running interrupts
@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
        do_optimize_kprobes();
 
        /* Step 4: Free cleaned kprobes after quiesence period */
-       do_free_cleaned_kprobes(&free_list);
+       do_free_cleaned_kprobes();
 
        mutex_unlock(&module_mutex);
        mutex_unlock(&kprobe_mutex);
@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
        if (!list_empty(&op->list))
                /* Dequeue from the (un)optimization queue */
                list_del_init(&op->list);
-
        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+
+       if (kprobe_unused(p)) {
+               /* Enqueue if it is unused */
+               list_add(&op->list, &freeing_list);
+               /*
+                * Remove unused probes from the hash list. After waiting
+                * for synchronization, this probe is reclaimed.
+                * (reclaiming is done by do_free_cleaned_kprobes().)
+                */
+               hlist_del_rcu(&op->kp.hlist);
+       }
+
        /* Don't touch the code, because it is already freed. */
        arch_remove_optimized_kprobe(op);
 }
index aed981a3f69c180dda76bfca76411656842eec01..335a7ae697f5986269d94a445fd27142fc75c09f 100644 (file)
@@ -665,20 +665,22 @@ static int ptrace_peek_siginfo(struct task_struct *child,
                if (unlikely(is_compat_task())) {
                        compat_siginfo_t __user *uinfo = compat_ptr(data);
 
-                       ret = copy_siginfo_to_user32(uinfo, &info);
-                       ret |= __put_user(info.si_code, &uinfo->si_code);
+                       if (copy_siginfo_to_user32(uinfo, &info) ||
+                           __put_user(info.si_code, &uinfo->si_code)) {
+                               ret = -EFAULT;
+                               break;
+                       }
+
                } else
 #endif
                {
                        siginfo_t __user *uinfo = (siginfo_t __user *) data;
 
-                       ret = copy_siginfo_to_user(uinfo, &info);
-                       ret |= __put_user(info.si_code, &uinfo->si_code);
-               }
-
-               if (ret) {
-                       ret = -EFAULT;
-                       break;
+                       if (copy_siginfo_to_user(uinfo, &info) ||
+                           __put_user(info.si_code, &uinfo->si_code)) {
+                               ret = -EFAULT;
+                               break;
+                       }
                }
 
                data += sizeof(siginfo_t);
index eb911dbce2679d29fea7226084be73d34a7336dd..322ea8e93e4ba36c11c0348a34c12e58c983da03 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sort.h>
-
+#include <linux/string.h>
 #include <linux/range.h>
 
 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
@@ -32,9 +32,8 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
        if (start >= end)
                return nr_range;
 
-       /* Try to merge it with old one: */
+       /* get new start/end: */
        for (i = 0; i < nr_range; i++) {
-               u64 final_start, final_end;
                u64 common_start, common_end;
 
                if (!range[i].end)
@@ -45,14 +44,16 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
                if (common_start > common_end)
                        continue;
 
-               final_start = min(range[i].start, start);
-               final_end = max(range[i].end, end);
+               /* new start/end, will add it back at last */
+               start = min(range[i].start, start);
+               end = max(range[i].end, end);
 
-               /* clear it and add it back for further merge */
-               range[i].start = 0;
-               range[i].end =  0;
-               return add_range_with_merge(range, az, nr_range,
-                       final_start, final_end);
+               memmove(&range[i], &range[i + 1],
+                       (nr_range - (i + 1)) * sizeof(range[i]));
+               range[nr_range - 1].start = 0;
+               range[nr_range - 1].end   = 0;
+               nr_range--;
+               i--;
        }
 
        /* Need to add it: */
index 58453b8272fdbab9422bf6c040683397c269ba82..e8b335016c526594cd910a030c962099907d8518 100644 (file)
@@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu)
 static inline bool got_nohz_idle_kick(void)
 {
        int cpu = smp_processor_id();
-       return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+
+       if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+               return false;
+
+       if (idle_cpu(cpu) && !need_resched())
+               return true;
+
+       /*
+        * We can't run Idle Load Balance on this CPU for this time so we
+        * cancel it and clear NOHZ_BALANCE_KICK
+        */
+       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+       return false;
 }
 
 #else /* CONFIG_NO_HZ_COMMON */
@@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void)
 
 void scheduler_ipi(void)
 {
-       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
-           && !tick_nohz_full_cpu(smp_processor_id()))
+       if (llist_empty(&this_rq()->wake_list)
+                       && !tick_nohz_full_cpu(smp_processor_id())
+                       && !got_nohz_idle_kick())
                return;
 
        /*
@@ -1417,7 +1430,7 @@ void scheduler_ipi(void)
        /*
         * Check if someone kicked us for doing the nohz idle load balance.
         */
-       if (unlikely(got_nohz_idle_kick() && !need_resched())) {
+       if (unlikely(got_nohz_idle_kick())) {
                this_rq()->idle_balance = 1;
                raise_softirq_irqoff(SCHED_SOFTIRQ);
        }
@@ -4745,7 +4758,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
-       vtime_init_idle(idle);
+       vtime_init_idle(idle, cpu);
 #if defined(CONFIG_SMP)
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
index cc2dc3eea8a3a55d7c86cc9896bfe9d235c18cab..b5ccba22603b51a92c2aa1f0ef647a878dd4e2c3 100644 (file)
@@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev)
 
        write_seqlock(&current->vtime_seqlock);
        current->vtime_snap_whence = VTIME_SYS;
-       current->vtime_snap = sched_clock();
+       current->vtime_snap = sched_clock_cpu(smp_processor_id());
        write_sequnlock(&current->vtime_seqlock);
 }
 
-void vtime_init_idle(struct task_struct *t)
+void vtime_init_idle(struct task_struct *t, int cpu)
 {
        unsigned long flags;
 
        write_seqlock_irqsave(&t->vtime_seqlock, flags);
        t->vtime_snap_whence = VTIME_SYS;
-       t->vtime_snap = sched_clock();
+       t->vtime_snap = sched_clock_cpu(cpu);
        write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
 }
 
index 0c739423b0f9c5728de497e5391730e417536e61..20d6fba70652094324c13ef6f8e2fcf9bba26a50 100644 (file)
@@ -599,8 +599,6 @@ void tick_broadcast_oneshot_control(unsigned long reason)
        } else {
                if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
                        clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
-                       if (dev->next_event.tv64 == KTIME_MAX)
-                               goto out;
                        /*
                         * The cpu which was handling the broadcast
                         * timer marked this cpu in the broadcast
@@ -614,6 +612,11 @@ void tick_broadcast_oneshot_control(unsigned long reason)
                                       tick_broadcast_pending_mask))
                                goto out;
 
+                       /*
+                        * Bail out if there is no next event.
+                        */
+                       if (dev->next_event.tv64 == KTIME_MAX)
+                               goto out;
                        /*
                         * If the pending bit is not set, then we are
                         * either the CPU handling the broadcast
@@ -698,10 +701,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 
                bc->event_handler = tick_handle_oneshot_broadcast;
 
-               /* Take the do_timer update */
-               if (!tick_nohz_full_cpu(cpu))
-                       tick_do_timer_cpu = cpu;
-
                /*
                 * We must be careful here. There might be other CPUs
                 * waiting for periodic broadcast. We need to set the
index f4208138fbf4cd1a72791f99c31adcbea9180e3e..0cf1c14531817eacb61eae48f6f76a45a0cc8027 100644 (file)
@@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
                 * we can't safely shutdown that CPU.
                 */
                if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
-                       return -EINVAL;
+                       return NOTIFY_BAD;
                break;
        }
        return NOTIFY_OK;
index 6698e0c04ead068279a7ed6a106a794b4ee1a8e6..ce0daa320a261f1aebfc5e16d7d7b3e7caf71c37 100644 (file)
@@ -287,3 +287,91 @@ wait_queue_head_t *bit_waitqueue(void *word, int bit)
        return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
 }
 EXPORT_SYMBOL(bit_waitqueue);
+
+/*
+ * Manipulate the atomic_t address to produce a better bit waitqueue table hash
+ * index (we're keying off bit -1, but that would produce a horrible hash
+ * value).
+ */
+static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
+{
+       if (BITS_PER_LONG == 64) {
+               unsigned long q = (unsigned long)p;
+               return bit_waitqueue((void *)(q & ~1), q & 1);
+       }
+       return bit_waitqueue(p, 0);
+}
+
+static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
+                                 void *arg)
+{
+       struct wait_bit_key *key = arg;
+       struct wait_bit_queue *wait_bit
+               = container_of(wait, struct wait_bit_queue, wait);
+       atomic_t *val = key->flags;
+
+       if (wait_bit->key.flags != key->flags ||
+           wait_bit->key.bit_nr != key->bit_nr ||
+           atomic_read(val) != 0)
+               return 0;
+       return autoremove_wake_function(wait, mode, sync, key);
+}
+
+/*
+ * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
+ * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
+ * return codes halt waiting and return.
+ */
+static __sched
+int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
+                      int (*action)(atomic_t *), unsigned mode)
+{
+       atomic_t *val;
+       int ret = 0;
+
+       do {
+               prepare_to_wait(wq, &q->wait, mode);
+               val = q->key.flags;
+               if (atomic_read(val) == 0)
+                       ret = (*action)(val);
+       } while (!ret && atomic_read(val) != 0);
+       finish_wait(wq, &q->wait);
+       return ret;
+}
+
+#define DEFINE_WAIT_ATOMIC_T(name, p)                                  \
+       struct wait_bit_queue name = {                                  \
+               .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p),              \
+               .wait   = {                                             \
+                       .private        = current,                      \
+                       .func           = wake_atomic_t_function,       \
+                       .task_list      =                               \
+                               LIST_HEAD_INIT((name).wait.task_list),  \
+               },                                                      \
+       }
+
+__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
+                                        unsigned mode)
+{
+       wait_queue_head_t *wq = atomic_t_waitqueue(p);
+       DEFINE_WAIT_ATOMIC_T(wait, p);
+
+       return __wait_on_atomic_t(wq, &wait, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
+
+/**
+ * wake_up_atomic_t - Wake up a waiter on a atomic_t
+ * @word: The word being waited on, a kernel virtual address
+ * @bit: The bit of the word being waited on
+ *
+ * Wake up anyone waiting for the atomic_t to go to zero.
+ *
+ * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
+ * check is done by the waiter's wake function, not the by the waker itself).
+ */
+void wake_up_atomic_t(atomic_t *p)
+{
+       __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
+}
+EXPORT_SYMBOL(wake_up_atomic_t);
index daed28dd583031b4759f68b04cfb7e99c0f60d48..829a77c628348a78b9efc130689d549368a57567 100644 (file)
@@ -48,7 +48,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
                if (!trylock_page(page))
                        BUG();
                page->mapping = mapping;
-               do_invalidatepage(page, 0);
+               do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
                page->mapping = NULL;
                unlock_page(page);
        }
index ff3218a0f5e14029aab5c23dd323f447d2a86173..2d414508e9ecb32e64df911c72bf0dd2682b87b8 100644 (file)
@@ -373,8 +373,10 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
 {
        int index;
 
-       if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
+       if (size > KMALLOC_MAX_SIZE) {
+               WARN_ON_ONCE(!(flags & __GFP_NOWARN));
                return NULL;
+       }
 
        if (size <= 192) {
                if (!size)
index c75b736e54b793f338fce25f35afa60eb0faf5ef..e2e8a8a7eb9d8facfd7581998641e72f81b4c02d 100644 (file)
@@ -26,7 +26,8 @@
 /**
  * do_invalidatepage - invalidate part or all of a page
  * @page: the page which is affected
- * @offset: the index of the truncation point
+ * @offset: start of the range to invalidate
+ * @length: length of the range to invalidate
  *
  * do_invalidatepage() is called when all or part of the page has become
  * invalidated by a truncate operation.
  * point.  Because the caller is about to free (and possibly reuse) those
  * blocks on-disk.
  */
-void do_invalidatepage(struct page *page, unsigned long offset)
+void do_invalidatepage(struct page *page, unsigned int offset,
+                      unsigned int length)
 {
-       void (*invalidatepage)(struct page *, unsigned long);
+       void (*invalidatepage)(struct page *, unsigned int, unsigned int);
+
        invalidatepage = page->mapping->a_ops->invalidatepage;
 #ifdef CONFIG_BLOCK
        if (!invalidatepage)
                invalidatepage = block_invalidatepage;
 #endif
        if (invalidatepage)
-               (*invalidatepage)(page, offset);
-}
-
-static inline void truncate_partial_page(struct page *page, unsigned partial)
-{
-       zero_user_segment(page, partial, PAGE_CACHE_SIZE);
-       cleancache_invalidate_page(page->mapping, page);
-       if (page_has_private(page))
-               do_invalidatepage(page, partial);
+               (*invalidatepage)(page, offset, length);
 }
 
 /*
@@ -103,7 +98,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
                return -EIO;
 
        if (page_has_private(page))
-               do_invalidatepage(page, 0);
+               do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
 
        cancel_dirty_page(page, PAGE_CACHE_SIZE);
 
@@ -185,11 +180,11 @@ int invalidate_inode_page(struct page *page)
  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
  * @mapping: mapping to truncate
  * @lstart: offset from which to truncate
- * @lend: offset to which to truncate
+ * @lend: offset to which to truncate (inclusive)
  *
  * Truncate the page cache, removing the pages that are between
- * specified offsets (and zeroing out partial page
- * (if lstart is not page aligned)).
+ * specified offsets (and zeroing out partial pages
+ * if lstart or lend + 1 is not page aligned).
  *
  * Truncate takes two passes - the first pass is nonblocking.  It will not
  * block on page locks and it will not block on writeback.  The second pass
@@ -200,35 +195,58 @@ int invalidate_inode_page(struct page *page)
  * We pass down the cache-hot hint to the page freeing code.  Even if the
  * mapping is large, it is probably the case that the final pages are the most
  * recently touched, and freeing happens in ascending file offset order.
+ *
+ * Note that since ->invalidatepage() accepts range to invalidate
+ * truncate_inode_pages_range is able to handle cases where lend + 1 is not
+ * page aligned properly.
  */
 void truncate_inode_pages_range(struct address_space *mapping,
                                loff_t lstart, loff_t lend)
 {
-       const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
-       const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
-       struct pagevec pvec;
-       pgoff_t index;
-       pgoff_t end;
-       int i;
+       pgoff_t         start;          /* inclusive */
+       pgoff_t         end;            /* exclusive */
+       unsigned int    partial_start;  /* inclusive */
+       unsigned int    partial_end;    /* exclusive */
+       struct pagevec  pvec;
+       pgoff_t         index;
+       int             i;
 
        cleancache_invalidate_inode(mapping);
        if (mapping->nrpages == 0)
                return;
 
-       BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
-       end = (lend >> PAGE_CACHE_SHIFT);
+       /* Offsets within partial pages */
+       partial_start = lstart & (PAGE_CACHE_SIZE - 1);
+       partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
+
+       /*
+        * 'start' and 'end' always covers the range of pages to be fully
+        * truncated. Partial pages are covered with 'partial_start' at the
+        * start of the range and 'partial_end' at the end of the range.
+        * Note that 'end' is exclusive while 'lend' is inclusive.
+        */
+       start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       if (lend == -1)
+               /*
+                * lend == -1 indicates end-of-file so we have to set 'end'
+                * to the highest possible pgoff_t and since the type is
+                * unsigned we're using -1.
+                */
+               end = -1;
+       else
+               end = (lend + 1) >> PAGE_CACHE_SHIFT;
 
        pagevec_init(&pvec, 0);
        index = start;
-       while (index <= end && pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+       while (index < end && pagevec_lookup(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
                        index = page->index;
-                       if (index > end)
+                       if (index >= end)
                                break;
 
                        if (!trylock_page(page))
@@ -247,27 +265,56 @@ void truncate_inode_pages_range(struct address_space *mapping,
                index++;
        }
 
-       if (partial) {
+       if (partial_start) {
                struct page *page = find_lock_page(mapping, start - 1);
                if (page) {
+                       unsigned int top = PAGE_CACHE_SIZE;
+                       if (start > end) {
+                               /* Truncation within a single page */
+                               top = partial_end;
+                               partial_end = 0;
+                       }
                        wait_on_page_writeback(page);
-                       truncate_partial_page(page, partial);
+                       zero_user_segment(page, partial_start, top);
+                       cleancache_invalidate_page(mapping, page);
+                       if (page_has_private(page))
+                               do_invalidatepage(page, partial_start,
+                                                 top - partial_start);
                        unlock_page(page);
                        page_cache_release(page);
                }
        }
+       if (partial_end) {
+               struct page *page = find_lock_page(mapping, end);
+               if (page) {
+                       wait_on_page_writeback(page);
+                       zero_user_segment(page, 0, partial_end);
+                       cleancache_invalidate_page(mapping, page);
+                       if (page_has_private(page))
+                               do_invalidatepage(page, 0,
+                                                 partial_end);
+                       unlock_page(page);
+                       page_cache_release(page);
+               }
+       }
+       /*
+        * If the truncation happened within a single page no pages
+        * will be released, just zeroed, so we can bail out now.
+        */
+       if (start >= end)
+               return;
 
        index = start;
        for ( ; ; ) {
                cond_resched();
                if (!pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
                        if (index == start)
                                break;
                        index = start;
                        continue;
                }
-               if (index == start && pvec.pages[0]->index > end) {
+               if (index == start && pvec.pages[0]->index >= end) {
                        pagevec_release(&pvec);
                        break;
                }
@@ -277,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
                        /* We rely upon deletion not changing page->index */
                        index = page->index;
-                       if (index > end)
+                       if (index >= end)
                                break;
 
                        lock_page(page);
@@ -598,10 +645,8 @@ void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
         * This rounding is currently just for example: unmap_mapping_range
         * expands its hole outwards, whereas we want it to contract the hole
         * inwards.  However, existing callers of truncate_pagecache_range are
-        * doing their own page rounding first; and truncate_inode_pages_range
-        * currently BUGs if lend is not pagealigned-1 (it handles partial
-        * page at start of hole, but not partial page at end of hole).  Note
-        * unmap_mapping_range allows holelen 0 for all, and we allow lend -1.
+        * doing their own page rounding first.  Note that unmap_mapping_range
+        * allows holelen 0 for all, and we allow lend -1 for end of file.
         */
 
        /*
index d817c932d634e6609081dbf8f492cf04463b867d..ace5e55fe5a32ed01fd8ca3c0c63c802632ac547 100644 (file)
@@ -341,7 +341,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
 
 static void bredr_setup(struct hci_request *req)
 {
-       struct hci_cp_delete_stored_link_key cp;
        __le16 param;
        __u8 flt_type;
 
@@ -365,10 +364,6 @@ static void bredr_setup(struct hci_request *req)
        param = __constant_cpu_to_le16(0x7d00);
        hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 
-       bacpy(&cp.bdaddr, BDADDR_ANY);
-       cp.delete_all = 0x01;
-       hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
-
        /* Read page scan parameters */
        if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
                hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
@@ -602,6 +597,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
        struct hci_dev *hdev = req->hdev;
        u8 p;
 
+       /* Only send HCI_Delete_Stored_Link_Key if it is supported */
+       if (hdev->commands[6] & 0x80) {
+               struct hci_cp_delete_stored_link_key cp;
+
+               bacpy(&cp.bdaddr, BDADDR_ANY);
+               cp.delete_all = 0x01;
+               hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
+                           sizeof(cp), &cp);
+       }
+
        if (hdev->commands[5] & 0x10)
                hci_setup_link_policy(req);
 
index 24bee07ee4ce1a54a86f5d5bd535b13bd1e42c02..68843a28a7af6a3e52f92f0cde033e03b031560a 100644 (file)
@@ -2852,6 +2852,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
        BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
               conn, code, ident, dlen);
 
+       if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
+               return NULL;
+
        len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
        count = min_t(unsigned int, conn->mtu, len);
 
@@ -4330,7 +4333,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
        struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
        u16 type, result;
 
-       if (cmd_len != sizeof(*rsp))
+       if (cmd_len < sizeof(*rsp))
                return -EPROTO;
 
        type   = __le16_to_cpu(rsp->type);
index 81f2389f78eb884e80cafbd624d3029aa48e3ed1..d6448e35e02712682953d3d617aeac3b5f40737e 100644 (file)
@@ -465,8 +465,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        skb_set_transport_header(skb, skb->len);
        mldq = (struct mld_msg *) icmp6_hdr(skb);
 
-       interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
-                                         br->multicast_query_response_interval;
+       interval = ipv6_addr_any(group) ?
+                       br->multicast_query_response_interval :
+                       br->multicast_last_member_interval;
 
        mldq->mld_type = ICMPV6_MGM_QUERY;
        mldq->mld_code = 0;
index fc1e289397f5895f3d2191ee78c9b450cc33e5cf..faebb398fb46ad93a1d09b051f8662bcf203eaab 100644 (file)
@@ -791,6 +791,40 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
 }
 EXPORT_SYMBOL(dev_get_by_index);
 
+/**
+ *     netdev_get_name - get a netdevice name, knowing its ifindex.
+ *     @net: network namespace
+ *     @name: a pointer to the buffer where the name will be stored.
+ *     @ifindex: the ifindex of the interface to get the name from.
+ *
+ *     The use of raw_seqcount_begin() and cond_resched() before
+ *     retrying is required as we want to give the writers a chance
+ *     to complete when CONFIG_PREEMPT is not set.
+ */
+int netdev_get_name(struct net *net, char *name, int ifindex)
+{
+       struct net_device *dev;
+       unsigned int seq;
+
+retry:
+       seq = raw_seqcount_begin(&devnet_rename_seq);
+       rcu_read_lock();
+       dev = dev_get_by_index_rcu(net, ifindex);
+       if (!dev) {
+               rcu_read_unlock();
+               return -ENODEV;
+       }
+
+       strcpy(name, dev->name);
+       rcu_read_unlock();
+       if (read_seqcount_retry(&devnet_rename_seq, seq)) {
+               cond_resched();
+               goto retry;
+       }
+
+       return 0;
+}
+
 /**
  *     dev_getbyhwaddr_rcu - find a device by its hardware address
  *     @net: the applicable net namespace
index 6cc0481faade0726cc2ae48e3161324e142f5b9f..5b7d0e1d0664b1b00d5f30a30248f88fbacde2fd 100644 (file)
@@ -19,9 +19,8 @@
 
 static int dev_ifname(struct net *net, struct ifreq __user *arg)
 {
-       struct net_device *dev;
        struct ifreq ifr;
-       unsigned seq;
+       int error;
 
        /*
         *      Fetch the caller's info block.
@@ -30,19 +29,9 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
        if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
                return -EFAULT;
 
-retry:
-       seq = read_seqcount_begin(&devnet_rename_seq);
-       rcu_read_lock();
-       dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
-       if (!dev) {
-               rcu_read_unlock();
-               return -ENODEV;
-       }
-
-       strcpy(ifr.ifr_name, dev->name);
-       rcu_read_unlock();
-       if (read_seqcount_retry(&devnet_rename_seq, seq))
-               goto retry;
+       error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
+       if (error)
+               return error;
 
        if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
                return -EFAULT;
index 22efdaa76ebf9909db7a69d59d54de191fd00d69..ce91766eeca90efc1a64b39997b0990b339b16cb 100644 (file)
@@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_IPV6_CSUM_BIT] =        "tx-checksum-ipv6",
        [NETIF_F_HIGHDMA_BIT] =          "highdma",
        [NETIF_F_FRAGLIST_BIT] =         "tx-scatter-gather-fraglist",
-       [NETIF_F_HW_VLAN_CTAG_TX_BIT] =  "tx-vlan-ctag-hw-insert",
+       [NETIF_F_HW_VLAN_CTAG_TX_BIT] =  "tx-vlan-hw-insert",
 
-       [NETIF_F_HW_VLAN_CTAG_RX_BIT] =  "rx-vlan-ctag-hw-parse",
-       [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter",
+       [NETIF_F_HW_VLAN_CTAG_RX_BIT] =  "rx-vlan-hw-parse",
+       [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
        [NETIF_F_HW_VLAN_STAG_TX_BIT] =  "tx-vlan-stag-hw-insert",
        [NETIF_F_HW_VLAN_STAG_RX_BIT] =  "rx-vlan-stag-hw-parse",
        [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
index cfd777bd6bd0cea8023cfe32e431488727982225..1c1738cc4538c142bcf8d3a7ec8a534204a68d91 100644 (file)
@@ -483,15 +483,8 @@ EXPORT_SYMBOL(skb_add_rx_frag);
 
 static void skb_drop_list(struct sk_buff **listp)
 {
-       struct sk_buff *list = *listp;
-
+       kfree_skb_list(*listp);
        *listp = NULL;
-
-       do {
-               struct sk_buff *this = list;
-               list = list->next;
-               kfree_skb(this);
-       } while (list);
 }
 
 static inline void skb_drop_fraglist(struct sk_buff *skb)
@@ -651,6 +644,17 @@ void kfree_skb(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(kfree_skb);
 
+void kfree_skb_list(struct sk_buff *segs)
+{
+       while (segs) {
+               struct sk_buff *next = segs->next;
+
+               kfree_skb(segs);
+               segs = next;
+       }
+}
+EXPORT_SYMBOL(kfree_skb_list);
+
 /**
  *     skb_tx_error - report an sk_buff xmit error
  *     @skb: buffer that triggered an error
index 88868a9d21da54761a09b2ecaf0b88766190152f..d6d024cfaaafd0575723d41004dd8d18816b55b0 100644 (file)
@@ -571,9 +571,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
        int ret = -ENOPROTOOPT;
 #ifdef CONFIG_NETDEVICES
        struct net *net = sock_net(sk);
-       struct net_device *dev;
        char devname[IFNAMSIZ];
-       unsigned seq;
 
        if (sk->sk_bound_dev_if == 0) {
                len = 0;
@@ -584,20 +582,9 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
        if (len < IFNAMSIZ)
                goto out;
 
-retry:
-       seq = read_seqcount_begin(&devnet_rename_seq);
-       rcu_read_lock();
-       dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
-       ret = -ENODEV;
-       if (!dev) {
-               rcu_read_unlock();
+       ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
+       if (ret)
                goto out;
-       }
-
-       strcpy(devname, dev->name);
-       rcu_read_unlock();
-       if (read_seqcount_retry(&devnet_rename_seq, seq))
-               goto retry;
 
        len = strlen(devname) + 1;
 
index b2e805af9b87a03675d7bac1a7e210124757e566..7856d1651d054325a67d1ac4847204be45bbceaa 100644 (file)
@@ -178,7 +178,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
 
                                err = __skb_linearize(skb);
                                if (err) {
-                                       kfree_skb(segs);
+                                       kfree_skb_list(segs);
                                        segs = ERR_PTR(err);
                                        goto out;
                                }
index ff4b781b1056e59937d44e9d751b6acf299d7a60..32b0e978c8e07efbb875fafbc15d7715003ec1d8 100644 (file)
@@ -125,15 +125,16 @@ static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
 /* timer function to flush queue in flushtimeout time */
 static void ulog_timer(unsigned long data)
 {
+       unsigned int groupnum = *((unsigned int *)data);
        struct ulog_net *ulog = container_of((void *)data,
                                             struct ulog_net,
-                                            nlgroup[*(unsigned int *)data]);
+                                            nlgroup[groupnum]);
        pr_debug("timer function called, calling ulog_send\n");
 
        /* lock to protect against somebody modifying our structure
         * from ipt_ulog_target at the same time */
        spin_lock_bh(&ulog->lock);
-       ulog_send(ulog, data);
+       ulog_send(ulog, groupnum);
        spin_unlock_bh(&ulog->lock);
 }
 
@@ -407,8 +408,11 @@ static int __net_init ulog_tg_net_init(struct net *net)
 
        spin_lock_init(&ulog->lock);
        /* initialize ulog_buffers */
-       for (i = 0; i < ULOG_MAXNLGROUPS; i++)
-               setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i);
+       for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
+               ulog->nlgroup[i] = i;
+               setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
+                           (unsigned long)&ulog->nlgroup[i]);
+       }
 
        ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
        if (!ulog->nflognl)
index 719652305a2950a3407804136bf7f42dc1d18e15..7999fc55c83ba74abeffbd1a9d53bec25985e163 100644 (file)
@@ -1003,7 +1003,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_info *md5sig;
 
-       key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
+       key = tcp_md5_do_lookup(sk, addr, family);
        if (key) {
                /* Pre-existing entry - just update that one. */
                memcpy(key->key, newkey, newkeylen);
@@ -1048,7 +1048,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
        struct tcp_md5sig_key *key;
        struct tcp_md5sig_info *md5sig;
 
-       key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
+       key = tcp_md5_do_lookup(sk, addr, family);
        if (!key)
                return -ENOENT;
        hlist_del_rcu(&key->node);
index 1bbf744c2cc35e7953b9580db08a357f4e829a04..4ab4c38958c6857afd7cfe998fc8866a0da00382 100644 (file)
@@ -2655,6 +2655,9 @@ static void init_loopback(struct net_device *dev)
                        if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
                                continue;
 
+                       if (sp_ifa->rt)
+                               continue;
+
                        sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
 
                        /* Failure cases are ignored */
@@ -4303,6 +4306,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
        struct inet6_ifaddr *ifp;
        struct net_device *dev = idev->dev;
        bool update_rs = false;
+       struct in6_addr ll_addr;
 
        if (token == NULL)
                return -EINVAL;
@@ -4322,11 +4326,9 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 
        write_unlock_bh(&idev->lock);
 
-       if (!idev->dead && (idev->if_flags & IF_READY)) {
-               struct in6_addr ll_addr;
-
-               ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
-                               IFA_F_OPTIMISTIC);
+       if (!idev->dead && (idev->if_flags & IF_READY) &&
+           !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
+                            IFA_F_OPTIMISTIC)) {
 
                /* If we're not ready, then normal ifup will take care
                 * of this. Otherwise, we need to request our rs here.
index dae1949019d7b8dc77d14c39478571e86ad89f34..d5d20cde8d92808387e171fb25b2fa701a06f275 100644 (file)
@@ -381,9 +381,8 @@ int ip6_forward(struct sk_buff *skb)
         *      cannot be fragmented, because there is no warranty
         *      that different fragments will go along one path. --ANK
         */
-       if (opt->ra) {
-               u8 *ptr = skb_network_header(skb) + opt->ra;
-               if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
+       if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
+               if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
                        return 0;
        }
 
@@ -822,11 +821,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
                                          const struct flowi6 *fl6)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct rt6_info *rt = (struct rt6_info *)dst;
+       struct rt6_info *rt;
 
        if (!dst)
                goto out;
 
+       if (dst->ops->family != AF_INET6) {
+               dst_release(dst);
+               return NULL;
+       }
+
+       rt = (struct rt6_info *)dst;
        /* Yes, checking route validity in not connected
         * case is not very simple. Take into account,
         * that we do not support routing by source, TOS,
index 2712ab22a174087c09cc705e1f6adec2bd601154..ca4ffcc287f1ebbd3cdb5011660f385479d597d3 100644 (file)
@@ -1493,7 +1493,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
         */
 
        if (ha)
-               ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha);
+               ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
 
        /*
         *      build redirect option and copy skb over to the new packet.
index 97bcf2bae8574a1a79d5410e18826f78e2ecc5dd..c9b6a6e6a1e8877dd634b51680fa2d5a5d9f2148 100644 (file)
@@ -204,7 +204,7 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
                if (ct != NULL && !nf_ct_is_untracked(ct)) {
                        help = nfct_help(ct);
                        if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
-                               nf_conntrack_get_reasm(skb);
+                               nf_conntrack_get_reasm(reasm);
                                NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
                                               (struct net_device *)in,
                                               (struct net_device *)out,
index c5fbd7589681cf984220325fff463696163340d0..9da862070dd84fe596f77582c82af6eec8ad660c 100644 (file)
@@ -1710,6 +1710,7 @@ static int key_notify_sa_flush(const struct km_event *c)
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+       hdr->sadb_msg_reserved = 0;
 
        pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
 
@@ -2699,6 +2700,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+       hdr->sadb_msg_reserved = 0;
        pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
        return 0;
 
index 1a89c80e6407ff6f487b3d7bf11e553e0e30e229..4fdb306e42e0c1724f7a843b6cc77239d6b065bb 100644 (file)
@@ -1057,6 +1057,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
 
+       if (sdata->wdev.cac_started) {
+               cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
+               cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
+                                  GFP_KERNEL);
+       }
+
        drv_stop_ap(sdata->local, sdata);
 
        /* free all potentially still buffered bcast frames */
index 44be28cfc6c4b17c7ee6251a1670f207ea99922e..9ca8e3278cc0398abb8464c40954e2a9cf73a5cb 100644 (file)
@@ -1497,10 +1497,11 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx_skb_tid(sdata, skb, 7);
 }
 
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action,
+u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                               struct ieee802_11_elems *elems,
                               u64 filter, u32 crc);
-static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action,
+static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
+                                         bool action,
                                          struct ieee802_11_elems *elems)
 {
        ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
index a8c2130c8ba4be03d3fea382b12f1253c3c2e768..741448b308257c3a6f01c3d09ca36d7ddaa31ca3 100644 (file)
@@ -2522,8 +2522,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        u16 capab_info, aid;
        struct ieee802_11_elems elems;
        struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+       const struct cfg80211_bss_ies *bss_ies = NULL;
+       struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
        u32 changed = 0;
        int err;
+       bool ret;
 
        /* AssocResp and ReassocResp have identical structure */
 
@@ -2554,6 +2557,69 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 
        ifmgd->aid = aid;
 
+       /*
+        * Some APs are erroneously not including some information in their
+        * (re)association response frames. Try to recover by using the data
+        * from the beacon or probe response. This seems to afflict mobile
+        * 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
+        * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
+        */
+       if ((assoc_data->wmm && !elems.wmm_param) ||
+           (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
+            (!elems.ht_cap_elem || !elems.ht_operation)) ||
+           (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
+            (!elems.vht_cap_elem || !elems.vht_operation))) {
+               const struct cfg80211_bss_ies *ies;
+               struct ieee802_11_elems bss_elems;
+
+               rcu_read_lock();
+               ies = rcu_dereference(cbss->ies);
+               if (ies)
+                       bss_ies = kmemdup(ies, sizeof(*ies) + ies->len,
+                                         GFP_ATOMIC);
+               rcu_read_unlock();
+               if (!bss_ies)
+                       return false;
+
+               ieee802_11_parse_elems(bss_ies->data, bss_ies->len,
+                                      false, &bss_elems);
+               if (assoc_data->wmm &&
+                   !elems.wmm_param && bss_elems.wmm_param) {
+                       elems.wmm_param = bss_elems.wmm_param;
+                       sdata_info(sdata,
+                                  "AP bug: WMM param missing from AssocResp\n");
+               }
+
+               /*
+                * Also check if we requested HT/VHT, otherwise the AP doesn't
+                * have to include the IEs in the (re)association response.
+                */
+               if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
+                       elems.ht_cap_elem = bss_elems.ht_cap_elem;
+                       sdata_info(sdata,
+                                  "AP bug: HT capability missing from AssocResp\n");
+               }
+               if (!elems.ht_operation && bss_elems.ht_operation &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
+                       elems.ht_operation = bss_elems.ht_operation;
+                       sdata_info(sdata,
+                                  "AP bug: HT operation missing from AssocResp\n");
+               }
+               if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
+                       elems.vht_cap_elem = bss_elems.vht_cap_elem;
+                       sdata_info(sdata,
+                                  "AP bug: VHT capa missing from AssocResp\n");
+               }
+               if (!elems.vht_operation && bss_elems.vht_operation &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
+                       elems.vht_operation = bss_elems.vht_operation;
+                       sdata_info(sdata,
+                                  "AP bug: VHT operation missing from AssocResp\n");
+               }
+       }
+
        /*
         * We previously checked these in the beacon/probe response, so
         * they should be present here. This is just a safety net.
@@ -2561,15 +2627,17 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
            (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
                sdata_info(sdata,
-                          "HT AP is missing WMM params or HT capability/operation in AssocResp\n");
-               return false;
+                          "HT AP is missing WMM params or HT capability/operation\n");
+               ret = false;
+               goto out;
        }
 
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
            (!elems.vht_cap_elem || !elems.vht_operation)) {
                sdata_info(sdata,
-                          "VHT AP is missing VHT capability/operation in AssocResp\n");
-               return false;
+                          "VHT AP is missing VHT capability/operation\n");
+               ret = false;
+               goto out;
        }
 
        mutex_lock(&sdata->local->sta_mtx);
@@ -2580,7 +2648,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        sta = sta_info_get(sdata, cbss->bssid);
        if (WARN_ON(!sta)) {
                mutex_unlock(&sdata->local->sta_mtx);
-               return false;
+               ret = false;
+               goto out;
        }
 
        sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
@@ -2633,7 +2702,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                           sta->sta.addr);
                WARN_ON(__sta_info_destroy(sta));
                mutex_unlock(&sdata->local->sta_mtx);
-               return false;
+               ret = false;
+               goto out;
        }
 
        mutex_unlock(&sdata->local->sta_mtx);
@@ -2673,7 +2743,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
        ieee80211_sta_reset_beacon_monitor(sdata);
 
-       return true;
+       ret = true;
+ out:
+       kfree(bss_ies);
+       return ret;
 }
 
 static enum rx_mgmt_action __must_check
index d3f414fe67e0beb9592e87d344a6b3a5c0fed66b..a02bef35b134e28e6d5ad8d6da38c505c309e756 100644 (file)
@@ -615,7 +615,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
                if (rates[i].idx < 0)
                        break;
 
-               rate_idx_match_mask(&rates[i], sband, mask, chan_width,
+               rate_idx_match_mask(&rates[i], sband, chan_width, mask,
                                    mcs_mask);
        }
 }
index 27e07150eb465824475f9fc15348ed2615e46738..72e6292955bb9eb3b896088d3dc756771e1ed3b4 100644 (file)
@@ -661,12 +661,12 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(ieee80211_queue_delayed_work);
 
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action,
+u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                               struct ieee802_11_elems *elems,
                               u64 filter, u32 crc)
 {
        size_t left = len;
-       u8 *pos = start;
+       const u8 *pos = start;
        bool calc_crc = filter != 0;
        DECLARE_BITMAP(seen_elems, 256);
        const u8 *ie;
index 05565d2b3a61b530acad48cfeda90b2af4b3adda..23b8eb53a5693d19e17904afa19afe15ff3ba058 100644 (file)
@@ -1442,7 +1442,8 @@ ignore_ipip:
 
        /* do the statistics and put it back */
        ip_vs_in_stats(cp, skb);
-       if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
+       if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
+           IPPROTO_SCTP == cih->protocol)
                offset += 2 * sizeof(__u16);
        verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
 
index 8fe2e99428b724086184d6b201369b6aeda82cf2..355d2ef0809477a36176ff5287bd8bfc25faee8c 100644 (file)
@@ -45,7 +45,7 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit)
        if (test_bit(bit, labels->bits))
                return 0;
 
-       if (test_and_set_bit(bit, labels->bits))
+       if (!test_and_set_bit(bit, labels->bits))
                nf_conntrack_event_cache(IPCT_LABEL, ct);
 
        return 0;
index 6d0f8a17c5b77d6c02b4df99ceddb02f48954b3e..ecf065f9403213141655410c049c49646498fac5 100644 (file)
@@ -1825,6 +1825,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                        nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
                                                      (1 << IPCT_ASSURED) |
                                                      (1 << IPCT_HELPER) |
+                                                     (1 << IPCT_LABEL) |
                                                      (1 << IPCT_PROTOINFO) |
                                                      (1 << IPCT_NATSEQADJ) |
                                                      (1 << IPCT_MARK),
index 96ccdf78a29ffafb84c5e1229f33487ec28c1429..dac11f73868e50df4e20add9e754d55629716f25 100644 (file)
@@ -230,9 +230,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
                                        &ct->tuplehash[!dir].tuple.src.u3,
                                        false);
                        if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
-                                          poff, plen, buffer, buflen))
+                                          poff, plen, buffer, buflen)) {
                                nf_ct_helper_log(skb, ct, "cannot mangle received");
                                return NF_DROP;
+                       }
                }
 
                /* The rport= parameter (RFC 3581) contains the port number
index afaebc766933a70de1198ce66b987ccdb62be9bd..7011c71646f0266eb75c856bc49fea7b5030bd52 100644 (file)
@@ -45,17 +45,22 @@ optlen(const u_int8_t *opt, unsigned int offset)
 
 static int
 tcpmss_mangle_packet(struct sk_buff *skb,
-                    const struct xt_tcpmss_info *info,
+                    const struct xt_action_param *par,
                     unsigned int in_mtu,
                     unsigned int tcphoff,
                     unsigned int minlen)
 {
+       const struct xt_tcpmss_info *info = par->targinfo;
        struct tcphdr *tcph;
        unsigned int tcplen, i;
        __be16 oldval;
        u16 newmss;
        u8 *opt;
 
+       /* This is a fragment, no TCP header is available */
+       if (par->fragoff != 0)
+               return XT_CONTINUE;
+
        if (!skb_make_writable(skb, skb->len))
                return -1;
 
@@ -125,11 +130,17 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 
        skb_put(skb, TCPOLEN_MSS);
 
-       /* RFC 879 states that the default MSS is 536 without specific
-        * knowledge that the destination host is prepared to accept larger.
-        * Since no MSS was provided, we MUST NOT set a value > 536.
+       /*
+        * IPv4: RFC 1122 states "If an MSS option is not received at
+        * connection setup, TCP MUST assume a default send MSS of 536".
+        * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
+        * length IPv6 header of 60, ergo the default MSS value is 1220
+        * Since no MSS was provided, we must use the default values
         */
-       newmss = min(newmss, (u16)536);
+       if (par->family == NFPROTO_IPV4)
+               newmss = min(newmss, (u16)536);
+       else
+               newmss = min(newmss, (u16)1220);
 
        opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
        memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
@@ -188,7 +199,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        __be16 newlen;
        int ret;
 
-       ret = tcpmss_mangle_packet(skb, par->targinfo,
+       ret = tcpmss_mangle_packet(skb, par,
                                   tcpmss_reverse_mtu(skb, PF_INET),
                                   iph->ihl * 4,
                                   sizeof(*iph) + sizeof(struct tcphdr));
@@ -217,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
        if (tcphoff < 0)
                return NF_DROP;
-       ret = tcpmss_mangle_packet(skb, par->targinfo,
+       ret = tcpmss_mangle_packet(skb, par,
                                   tcpmss_reverse_mtu(skb, PF_INET6),
                                   tcphoff,
                                   sizeof(*ipv6h) + sizeof(struct tcphdr));
index 1eb1a44bfd3d134452993dfb071aa15481857ad7..b68fa191710fe02bdb1b09f825ee6330c9bf570b 100644 (file)
@@ -48,11 +48,13 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
                return NF_DROP;
 
        len = skb->len - tcphoff;
-       if (len < (int)sizeof(struct tcphdr) ||
-           tcp_hdr(skb)->doff * 4 > len)
+       if (len < (int)sizeof(struct tcphdr))
                return NF_DROP;
 
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+       if (tcph->doff * 4 > len)
+               return NF_DROP;
+
        opt  = (u_int8_t *)tcph;
 
        /*
index d5aed3bb394545a6a07e15e373156e1ab189f8e2..b14b7e3cb6e65a76517752872753b53264baa7b4 100644 (file)
@@ -1564,12 +1564,17 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
        struct cfg80211_registered_device *dev;
        s64 filter_wiphy = -1;
        bool split = false;
-       struct nlattr **tb = nl80211_fam.attrbuf;
+       struct nlattr **tb;
        int res;
 
+       /* will be zeroed in nlmsg_parse() */
+       tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL);
+       if (!tb)
+               return -ENOMEM;
+
        mutex_lock(&cfg80211_mutex);
        res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
-                         tb, nl80211_fam.maxattr, nl80211_policy);
+                         tb, NL80211_ATTR_MAX, nl80211_policy);
        if (res == 0) {
                split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
                if (tb[NL80211_ATTR_WIPHY])
@@ -1583,6 +1588,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                        netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
                        if (!netdev) {
                                mutex_unlock(&cfg80211_mutex);
+                               kfree(tb);
                                return -ENODEV;
                        }
                        if (netdev->ieee80211_ptr) {
@@ -1593,6 +1599,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                        dev_put(netdev);
                }
        }
+       kfree(tb);
 
        list_for_each_entry(dev, &cfg80211_rdev_list, list) {
                if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
index bd8d46cca2b3132b71a0fc5da99f31f0763d3cc9..cccaf9c7a7bbd29b195b6ae475c2479e528bdd86 100644 (file)
@@ -58,6 +58,7 @@ enum {
        CS420X_GPIO_23,
        CS420X_MBP101,
        CS420X_MBP81,
+       CS420X_MBA42,
        CS420X_AUTO,
        /* aliases */
        CS420X_IMAC27_122 = CS420X_GPIO_23,
@@ -346,6 +347,7 @@ static const struct hda_model_fixup cs420x_models[] = {
        { .id = CS420X_APPLE, .name = "apple" },
        { .id = CS420X_MBP101, .name = "mbp101" },
        { .id = CS420X_MBP81, .name = "mbp81" },
+       { .id = CS420X_MBA42, .name = "mba42" },
        {}
 };
 
@@ -361,6 +363,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
        SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
        SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+       SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
        SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
        {} /* terminator */
 };
@@ -414,6 +417,20 @@ static const struct hda_pintbl mbp101_pincfgs[] = {
        {} /* terminator */
 };
 
+static const struct hda_pintbl mba42_pincfgs[] = {
+       { 0x09, 0x012b4030 }, /* HP */
+       { 0x0a, 0x400000f0 },
+       { 0x0b, 0x90100120 }, /* speaker */
+       { 0x0c, 0x400000f0 },
+       { 0x0d, 0x90a00110 }, /* mic */
+       { 0x0e, 0x400000f0 },
+       { 0x0f, 0x400000f0 },
+       { 0x10, 0x400000f0 },
+       { 0x12, 0x400000f0 },
+       { 0x15, 0x400000f0 },
+       {} /* terminator */
+};
+
 static void cs420x_fixup_gpio_13(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -482,6 +499,12 @@ static const struct hda_fixup cs420x_fixups[] = {
                .chained = true,
                .chain_id = CS420X_GPIO_13,
        },
+       [CS420X_MBA42] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = mba42_pincfgs,
+               .chained = true,
+               .chain_id = CS420X_GPIO_13,
+       },
 };
 
 static struct cs_spec *cs_alloc_spec(struct hda_codec *codec, int vendor_nid)
index 02e22b4458d2ed5222c5985ec8bb2593c4a5c405..403010c9e82ea610125c82811e7f1f4ea3d27aac 100644 (file)
@@ -3483,6 +3483,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x05e0, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3494,6 +3495,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
        SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
@@ -3596,6 +3599,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
        {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+       {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+       {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
        {}
 };
 
@@ -4275,6 +4280,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
        {.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"},
        {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
        {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
+       {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
        {}
 };
 
index 6e5fed30aa27a00ccfb741519d6cf2ae6cd8bc9c..ce1e1e16f250affafbc333d165c2843e043bfa23 100644 (file)
@@ -257,7 +257,6 @@ static int idma_mmap(struct snd_pcm_substream *substream,
 
        /* From snd_pcm_lib_mmap_iomem */
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       vma->vm_flags |= VM_IO;
        size = vma->vm_end - vma->vm_start;
        offset = vma->vm_pgoff << PAGE_SHIFT;
        ret = io_remap_pfn_range(vma, vma->vm_start,
index 1a033177b83f63f94d1be2262ccc0553ac9b8034..64952e2d3ed1e94a12ad9e593a15298e8eaceee7 100644 (file)
@@ -147,14 +147,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
                return -EINVAL;
        }
 
+       alts = &iface->altsetting[0];
+       altsd = get_iface_desc(alts);
+
+       /*
+        * Android with both accessory and audio interfaces enabled gets the
+        * interface numbers wrong.
+        */
+       if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
+            chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
+           interface == 0 &&
+           altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+           altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
+               interface = 2;
+               iface = usb_ifnum_to_if(dev, interface);
+               if (!iface)
+                       return -EINVAL;
+               alts = &iface->altsetting[0];
+               altsd = get_iface_desc(alts);
+       }
+
        if (usb_interface_claimed(iface)) {
                snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
                                                dev->devnum, ctrlif, interface);
                return -EINVAL;
        }
 
-       alts = &iface->altsetting[0];
-       altsd = get_iface_desc(alts);
        if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
             altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
            altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
index e5c7f9f20fddbea288de5287ab6923314986570a..d5438083fd6a8072c20275778f1f3b9fa0a42517 100644 (file)
@@ -885,6 +885,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
 
        case USB_ID(0x046d, 0x0808):
        case USB_ID(0x046d, 0x0809):
+       case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
        case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
        case USB_ID(0x046d, 0x0991):